Aristo db implement distributed backend access (#1688)
* Fix hashing algorithm why: Particular case where a sub-tree is on the backend, linked by an Extension vertex to the top level. * Update backend verification to report `dirty` top layer * Implement distributed merge of backend filters * Implement distributed backend access management details: Implemented and tested as described in chapter 5 of the `README.md` file.
This commit is contained in:
parent
3f0506b5bc
commit
3078c207ca
|
@ -349,7 +349,7 @@ should be allocated in the structural table associated with the zero key.
|
||||||
db | stack[n] | |
|
db | stack[n] | |
|
||||||
desc | : | | optional passive delta layers, handled by
|
desc | : | | optional passive delta layers, handled by
|
||||||
obj | stack[1] | | transaction management (can be used to
|
obj | stack[1] | | transaction management (can be used to
|
||||||
| | stack[0] | | successively replace the top layer)
|
| | stack[0] | | successively recover the top layer)
|
||||||
| +----------+ v
|
| +----------+ v
|
||||||
| +----------+
|
| +----------+
|
||||||
| | roFilter | optional read-only backend filter
|
| | roFilter | optional read-only backend filter
|
||||||
|
@ -449,11 +449,15 @@ Nevertheless, *(8)* can alse be transformed by committing and saving *tx2*
|
||||||
| ø, ø | tx2+PBE
|
| ø, ø | tx2+PBE
|
||||||
| tx3, ~tx2 |
|
| tx3, ~tx2 |
|
||||||
|
|
||||||
As *(11)* and *(13)* represent the same API, one has
|
As *(11)* and *(13)* represent the same API, one has
|
||||||
|
|
||||||
tx2+PBE == tx1+(tx2+~tx1)+PBE because of the middle rows (14)
|
tx2+PBE =~ tx1+(tx2+~tx1)+PBE because of the middle rows (14)
|
||||||
~tx2 == ~tx1+~(tx2+~tx1) because of (14) (15)
|
~tx2 =~ ~tx1+~(tx2+~tx1) because of (14) (15)
|
||||||
|
|
||||||
which shows some distributive property in *(14)* and commutative property in
|
which looks like some distributive property in *(14)* and commutative
|
||||||
*(15)* for this example. In particulat it might be handy for testing/verifying
|
property in *(15)* for this example (but it is not straight algebraically.)
|
||||||
against this example.
|
The *=~* operator above indicates that the representations are equivalent in
|
||||||
|
the sense that they have the same effect on the backend database (looks a
|
||||||
|
bit like residue classes.)
|
||||||
|
|
||||||
|
It might be handy for testing/verifying an implementation using this example.
|
||||||
|
|
|
@ -129,6 +129,9 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
|
||||||
|
|
||||||
# Check cache against backend
|
# Check cache against backend
|
||||||
if cache:
|
if cache:
|
||||||
|
if db.top.dirty:
|
||||||
|
return err((VertexID(0),CheckBeCacheIsDirty))
|
||||||
|
|
||||||
# Check structural table
|
# Check structural table
|
||||||
for (vid,vtx) in db.top.sTab.pairs:
|
for (vid,vtx) in db.top.sTab.pairs:
|
||||||
# A `kMap[]` entry must exist.
|
# A `kMap[]` entry must exist.
|
||||||
|
|
|
@ -200,7 +200,7 @@ proc ppSTab(
|
||||||
"{" & sTab.sortedKeys
|
"{" & sTab.sortedKeys
|
||||||
.mapIt((it, sTab.getOrVoid it))
|
.mapIt((it, sTab.getOrVoid it))
|
||||||
.mapIt("(" & it[0].ppVid & "," & it[1].ppVtx(db,it[0]) & ")")
|
.mapIt("(" & it[0].ppVid & "," & it[1].ppVtx(db,it[0]) & ")")
|
||||||
.join("," & indent.toPfx(1)) & "}"
|
.join(indent.toPfx(2)) & "}"
|
||||||
|
|
||||||
proc ppLTab(
|
proc ppLTab(
|
||||||
lTab: Table[LeafTie,VertexID];
|
lTab: Table[LeafTie,VertexID];
|
||||||
|
@ -210,7 +210,7 @@ proc ppLTab(
|
||||||
"{" & lTab.sortedKeys
|
"{" & lTab.sortedKeys
|
||||||
.mapIt((it, lTab.getOrVoid it))
|
.mapIt((it, lTab.getOrVoid it))
|
||||||
.mapIt("(" & it[0].ppLeafTie(db) & "," & it[1].ppVid & ")")
|
.mapIt("(" & it[0].ppLeafTie(db) & "," & it[1].ppVid & ")")
|
||||||
.join("," & indent.toPfx(1)) & "}"
|
.join(indent.toPfx(2)) & "}"
|
||||||
|
|
||||||
proc ppPPrf(pPrf: HashSet[VertexID]): string =
|
proc ppPPrf(pPrf: HashSet[VertexID]): string =
|
||||||
"{" & pPrf.sortedKeys.mapIt(it.ppVid).join(",") & "}"
|
"{" & pPrf.sortedKeys.mapIt(it.ppVid).join(",") & "}"
|
||||||
|
@ -324,9 +324,11 @@ proc ppFilter(fl: AristoFilterRef; db: AristoDbRef; indent: int): string =
|
||||||
pfx1 = indent.toPfx(1)
|
pfx1 = indent.toPfx(1)
|
||||||
pfx2 = indent.toPfx(2)
|
pfx2 = indent.toPfx(2)
|
||||||
result = "<filter>"
|
result = "<filter>"
|
||||||
if db.roFilter.isNil:
|
if fl.isNil:
|
||||||
result &= " n/a"
|
result &= " n/a"
|
||||||
return
|
return
|
||||||
|
result &= pfx & "trg(" & fl.trg.ppKey & ")"
|
||||||
|
result &= pfx & "src(" & fl.src.ppKey & ")"
|
||||||
result &= pfx & "vGen" & pfx1 & "["
|
result &= pfx & "vGen" & pfx1 & "["
|
||||||
if fl.vGen.isSome:
|
if fl.vGen.isSome:
|
||||||
result &= fl.vGen.unsafeGet.mapIt(it.ppVid).join(",")
|
result &= fl.vGen.unsafeGet.mapIt(it.ppVid).join(",")
|
||||||
|
@ -361,7 +363,7 @@ proc ppBeOnly[T](be: T; db: AristoDbRef; indent: int): string =
|
||||||
|
|
||||||
proc ppBe[T](be: T; db: AristoDbRef; indent: int): string =
|
proc ppBe[T](be: T; db: AristoDbRef; indent: int): string =
|
||||||
## backend + filter
|
## backend + filter
|
||||||
db.roFilter.ppFilter(db, indent) & indent.toPfx & be.ppBeOnly(db,indent)
|
db.roFilter.ppFilter(db, indent+1) & indent.toPfx & be.ppBeOnly(db,indent+1)
|
||||||
|
|
||||||
proc ppLayer(
|
proc ppLayer(
|
||||||
layer: AristoLayerRef;
|
layer: AristoLayerRef;
|
||||||
|
@ -374,8 +376,8 @@ proc ppLayer(
|
||||||
indent = 4;
|
indent = 4;
|
||||||
): string =
|
): string =
|
||||||
let
|
let
|
||||||
pfx1 = indent.toPfx
|
pfx1 = indent.toPfx(1)
|
||||||
pfx2 = indent.toPfx(1)
|
pfx2 = indent.toPfx(2)
|
||||||
nOKs = sTabOk.ord + lTabOk.ord + kMapOk.ord + pPrfOk.ord + vGenOk.ord
|
nOKs = sTabOk.ord + lTabOk.ord + kMapOk.ord + pPrfOk.ord + vGenOk.ord
|
||||||
tagOk = 1 < nOKs
|
tagOk = 1 < nOKs
|
||||||
var
|
var
|
||||||
|
@ -392,6 +394,8 @@ proc ppLayer(
|
||||||
rc
|
rc
|
||||||
|
|
||||||
if not layer.isNil:
|
if not layer.isNil:
|
||||||
|
if 2 < nOKs:
|
||||||
|
result &= "<layer>".doPrefix(false)
|
||||||
if vGenOk:
|
if vGenOk:
|
||||||
let
|
let
|
||||||
tLen = layer.vGen.len
|
tLen = layer.vGen.len
|
||||||
|
@ -613,6 +617,12 @@ proc pp*(
|
||||||
): string =
|
): string =
|
||||||
db.top.pp(db, xTabOk=xTabOk, kMapOk=kMapOk, other=other, indent=indent)
|
db.top.pp(db, xTabOk=xTabOk, kMapOk=kMapOk, other=other, indent=indent)
|
||||||
|
|
||||||
|
proc pp*(
|
||||||
|
filter: AristoFilterRef;
|
||||||
|
db = AristoDbRef();
|
||||||
|
indent = 4;
|
||||||
|
): string =
|
||||||
|
filter.ppFilter(db, indent)
|
||||||
|
|
||||||
proc pp*(
|
proc pp*(
|
||||||
be: TypedBackendRef;
|
be: TypedBackendRef;
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
## Aristo DB -- a Patricia Trie with labeled edges
|
## Aristo DB -- a Patricia Trie with labeled edges
|
||||||
## ===============================================
|
## ===============================================
|
||||||
##
|
##
|
||||||
## These data structures allows to overlay the *Patricia Trie* with *Merkel
|
## These data structures allow to overlay the *Patricia Trie* with *Merkel
|
||||||
## Trie* hashes. See the `README.md` in the `aristo` folder for documentation.
|
## Trie* hashes. See the `README.md` in the `aristo` folder for documentation.
|
||||||
##
|
##
|
||||||
## Some semantic explanations;
|
## Some semantic explanations;
|
||||||
|
@ -22,7 +22,7 @@
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/tables,
|
std/[hashes, sets, tables],
|
||||||
eth/common,
|
eth/common,
|
||||||
./aristo_constants,
|
./aristo_constants,
|
||||||
./aristo_desc/[
|
./aristo_desc/[
|
||||||
|
@ -31,8 +31,8 @@ import
|
||||||
from ./aristo_desc/aristo_types_backend
|
from ./aristo_desc/aristo_types_backend
|
||||||
import AristoBackendRef
|
import AristoBackendRef
|
||||||
|
|
||||||
|
# Not auto-exporting backend
|
||||||
export
|
export
|
||||||
# Not auto-exporting backend
|
|
||||||
aristo_constants, aristo_error, aristo_types_identifiers,
|
aristo_constants, aristo_error, aristo_types_identifiers,
|
||||||
aristo_types_structural
|
aristo_types_structural
|
||||||
|
|
||||||
|
@ -44,9 +44,16 @@ type
|
||||||
txUid*: uint ## Unique ID among transactions
|
txUid*: uint ## Unique ID among transactions
|
||||||
level*: int ## Stack index for this transaction
|
level*: int ## Stack index for this transaction
|
||||||
|
|
||||||
|
AristoDudesRef* = ref object
|
||||||
|
case rwOk*: bool
|
||||||
|
of true:
|
||||||
|
roDudes*: HashSet[AristoDbRef] ## Read-only peers
|
||||||
|
else:
|
||||||
|
rwDb*: AristoDbRef ## Link to writable descriptor
|
||||||
|
|
||||||
AristoDbRef* = ref AristoDbObj
|
AristoDbRef* = ref AristoDbObj
|
||||||
AristoDbObj* = object
|
AristoDbObj* = object
|
||||||
## Set of database layers, supporting transaction frames
|
## Three tier database object supporting distributed instances.
|
||||||
top*: AristoLayerRef ## Database working layer, mutable
|
top*: AristoLayerRef ## Database working layer, mutable
|
||||||
stack*: seq[AristoLayerRef] ## Stashed immutable parent layers
|
stack*: seq[AristoLayerRef] ## Stashed immutable parent layers
|
||||||
roFilter*: AristoFilterRef ## Apply read filter (locks writing)
|
roFilter*: AristoFilterRef ## Apply read filter (locks writing)
|
||||||
|
@ -54,10 +61,14 @@ type
|
||||||
|
|
||||||
txRef*: AristoTxRef ## Latest active transaction
|
txRef*: AristoTxRef ## Latest active transaction
|
||||||
txUidGen*: uint ## Tx-relative unique number generator
|
txUidGen*: uint ## Tx-relative unique number generator
|
||||||
|
dudes*: AristoDudesRef ## Related DB descriptors
|
||||||
|
|
||||||
# Debugging data below, might go away in future
|
# Debugging data below, might go away in future
|
||||||
xMap*: Table[HashLabel,VertexID] ## For pretty printing, extends `pAmk`
|
xMap*: Table[HashLabel,VertexID] ## For pretty printing, extends `pAmk`
|
||||||
|
|
||||||
|
AristoDbAction* = proc(db: AristoDbRef) {.gcsafe, raises: [CatchableError].}
|
||||||
|
## Generic call back function/closure.
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public helpers
|
# Public helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -98,9 +109,13 @@ func isValid*(vid: VertexID): bool =
|
||||||
# Public functions, miscellaneous
|
# Public functions, miscellaneous
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Hash set helper
|
||||||
|
func hash*(db: AristoDbRef): Hash =
|
||||||
|
## Table/KeyedQueue/HashSet mixin
|
||||||
|
cast[pointer](db).hash
|
||||||
|
|
||||||
# Note that the below `init()` function cannot go into
|
# Note that the below `init()` function cannot go into
|
||||||
# `aristo_types_identifiers` as this would result in a circular import.
|
# `aristo_types_identifiers` as this would result in a circular import.
|
||||||
|
|
||||||
func init*(key: var HashKey; data: openArray[byte]): bool =
|
func init*(key: var HashKey; data: openArray[byte]): bool =
|
||||||
## Import argument `data` into `key` which must have length either `32`, or
|
## Import argument `data` into `key` which must have length either `32`, or
|
||||||
## `0`. The latter case is equivalent to an all zero byte array of size `32`.
|
## `0`. The latter case is equivalent to an all zero byte array of size `32`.
|
||||||
|
|
|
@ -92,7 +92,8 @@ type
|
||||||
HashifyCannotComplete
|
HashifyCannotComplete
|
||||||
HashifyCannotHashRoot
|
HashifyCannotHashRoot
|
||||||
HashifyExistingHashMismatch
|
HashifyExistingHashMismatch
|
||||||
HashifyLeafToRootAllFailed
|
HashifyDownVtxlevelExceeded
|
||||||
|
HashifyDownVtxLeafUnexpected
|
||||||
HashifyRootHashMismatch
|
HashifyRootHashMismatch
|
||||||
HashifyRootVidMismatch
|
HashifyRootVidMismatch
|
||||||
HashifyVidCircularDependence
|
HashifyVidCircularDependence
|
||||||
|
@ -131,6 +132,7 @@ type
|
||||||
CheckBeKeyMismatch
|
CheckBeKeyMismatch
|
||||||
CheckBeGarbledVGen
|
CheckBeGarbledVGen
|
||||||
|
|
||||||
|
CheckBeCacheIsDirty
|
||||||
CheckBeCacheKeyMissing
|
CheckBeCacheKeyMissing
|
||||||
CheckBeCacheKeyNonEmpty
|
CheckBeCacheKeyNonEmpty
|
||||||
CheckBeCacheVidUnsynced
|
CheckBeCacheVidUnsynced
|
||||||
|
@ -167,9 +169,12 @@ type
|
||||||
DelVidStaleVtx
|
DelVidStaleVtx
|
||||||
|
|
||||||
# Functions from `aristo_filter.nim`
|
# Functions from `aristo_filter.nim`
|
||||||
|
FilRoBackendOrMissing
|
||||||
FilStateRootMissing
|
FilStateRootMissing
|
||||||
FilStateRootMismatch
|
FilStateRootMismatch
|
||||||
FilPrettyPointlessLayer
|
FilPrettyPointlessLayer
|
||||||
|
FilDudeFilterUpdateError
|
||||||
|
FilNotReadOnlyDude
|
||||||
|
|
||||||
# Get functions form `aristo_get.nim`
|
# Get functions form `aristo_get.nim`
|
||||||
GetLeafNotFound
|
GetLeafNotFound
|
||||||
|
@ -192,8 +197,9 @@ type
|
||||||
|
|
||||||
# Transaction wrappers
|
# Transaction wrappers
|
||||||
TxArgStaleTx
|
TxArgStaleTx
|
||||||
TxBackendMissing
|
TxRoBackendOrMissing
|
||||||
TxNoPendingTx
|
TxNoPendingTx
|
||||||
|
TxPendingTx
|
||||||
TxNotTopTx
|
TxNotTopTx
|
||||||
TxStackGarbled
|
TxStackGarbled
|
||||||
TxStackUnderflow
|
TxStackUnderflow
|
||||||
|
|
|
@ -250,6 +250,16 @@ proc dup*(layer: AristoLayerRef): AristoLayerRef =
|
||||||
for (k,v) in layer.sTab.pairs:
|
for (k,v) in layer.sTab.pairs:
|
||||||
result.sTab[k] = v.dup
|
result.sTab[k] = v.dup
|
||||||
|
|
||||||
|
proc dup*(filter: AristoFilterRef): AristoFilterRef =
|
||||||
|
## Duplicate layer.
|
||||||
|
result = AristoFilterRef(
|
||||||
|
src: filter.src,
|
||||||
|
kMap: filter.kMap,
|
||||||
|
vGen: filter.vGen,
|
||||||
|
trg: filter.trg)
|
||||||
|
for (k,v) in filter.sTab.pairs:
|
||||||
|
result.sTab[k] = v.dup
|
||||||
|
|
||||||
proc to*(node: NodeRef; T: type VertexRef): T =
|
proc to*(node: NodeRef; T: type VertexRef): T =
|
||||||
## Extract a copy of the `VertexRef` part from a `NodeRef`.
|
## Extract a copy of the `VertexRef` part from a `NodeRef`.
|
||||||
node.VertexRef.dup
|
node.VertexRef.dup
|
||||||
|
|
|
@ -13,8 +13,9 @@
|
||||||
##
|
##
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[options, sequtils, tables],
|
std/[options, sequtils, sets, tables],
|
||||||
results,
|
results,
|
||||||
|
./aristo_desc/aristo_types_backend,
|
||||||
"."/[aristo_desc, aristo_get, aristo_vid]
|
"."/[aristo_desc, aristo_get, aristo_vid]
|
||||||
|
|
||||||
type
|
type
|
||||||
|
@ -26,16 +27,6 @@ type
|
||||||
# Private helpers
|
# Private helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc getBeStateRoot(
|
|
||||||
db: AristoDbRef;
|
|
||||||
): Result[HashKey,AristoError] =
|
|
||||||
let rc = db.getKeyBE VertexID(1)
|
|
||||||
if rc.isOk:
|
|
||||||
return ok(rc.value)
|
|
||||||
if rc.error == GetKeyNotFound:
|
|
||||||
return ok(VOID_HASH_KEY)
|
|
||||||
err(rc.error)
|
|
||||||
|
|
||||||
proc getLayerStateRoots(
|
proc getLayerStateRoots(
|
||||||
db: AristoDbRef;
|
db: AristoDbRef;
|
||||||
layer: AristoLayerRef;
|
layer: AristoLayerRef;
|
||||||
|
@ -44,26 +35,115 @@ proc getLayerStateRoots(
|
||||||
## Get the Merkle hash key for target state root to arrive at after this
|
## Get the Merkle hash key for target state root to arrive at after this
|
||||||
## reverse filter was applied.
|
## reverse filter was applied.
|
||||||
var spr: StateRootPair
|
var spr: StateRootPair
|
||||||
block:
|
|
||||||
let rc = db.getBeStateRoot()
|
spr.be = block:
|
||||||
if rc.isErr:
|
let rc = db.getKeyBE VertexID(1)
|
||||||
|
if rc.isOk:
|
||||||
|
rc.value
|
||||||
|
elif rc.error == GetKeyNotFound:
|
||||||
|
VOID_HASH_KEY
|
||||||
|
else:
|
||||||
return err(rc.error)
|
return err(rc.error)
|
||||||
spr.be = rc.value
|
|
||||||
block:
|
block:
|
||||||
spr.fg = layer.kMap.getOrVoid(VertexID 1).key
|
spr.fg = layer.kMap.getOrVoid(VertexID 1).key
|
||||||
if spr.fg.isValid:
|
if spr.fg.isValid:
|
||||||
return ok(spr)
|
return ok(spr)
|
||||||
|
|
||||||
if chunkedMpt:
|
if chunkedMpt:
|
||||||
let vid = layer.pAmk.getOrVoid HashLabel(root: VertexID(1), key: spr.be)
|
let vid = layer.pAmk.getOrVoid HashLabel(root: VertexID(1), key: spr.be)
|
||||||
if vid == VertexID(1):
|
if vid == VertexID(1):
|
||||||
spr.fg = spr.be
|
spr.fg = spr.be
|
||||||
return ok(spr)
|
return ok(spr)
|
||||||
|
|
||||||
if layer.sTab.len == 0 and
|
if layer.sTab.len == 0 and
|
||||||
layer.kMap.len == 0 and
|
layer.kMap.len == 0 and
|
||||||
layer.pAmk.len == 0:
|
layer.pAmk.len == 0:
|
||||||
return err(FilPrettyPointlessLayer)
|
return err(FilPrettyPointlessLayer)
|
||||||
|
|
||||||
err(FilStateRootMismatch)
|
err(FilStateRootMismatch)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Private functions
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc merge(
|
||||||
|
db: AristoDbRef;
|
||||||
|
upper: AristoFilterRef; # Src filter, `nil` is ok
|
||||||
|
lower: AristoFilterRef; # Trg filter, `nil` is ok
|
||||||
|
beStateRoot: HashKey; # Merkle hash key
|
||||||
|
): Result[AristoFilterRef,(VertexID,AristoError)] =
|
||||||
|
## Merge argument `upper` into the `lower` filter instance.
|
||||||
|
##
|
||||||
|
## Comparing before and after merge
|
||||||
|
## ::
|
||||||
|
## current | merged
|
||||||
|
## ----------------------------+--------------------------------
|
||||||
|
## trg2 --upper-- (src2==trg1) |
|
||||||
|
## | trg2 --newFilter-- (src1==trg0)
|
||||||
|
## trg1 --lower-- (src1==trg0) |
|
||||||
|
## |
|
||||||
|
## trg0 --beStateRoot | trg0 --beStateRoot
|
||||||
|
## |
|
||||||
|
##
|
||||||
|
# Degenerate case: `upper` is void
|
||||||
|
if lower.isNil or lower.vGen.isNone:
|
||||||
|
if upper.isNil or upper.vGen.isNone:
|
||||||
|
# Even more degenerate case when both filters are void
|
||||||
|
return ok AristoFilterRef(
|
||||||
|
src: beStateRoot,
|
||||||
|
trg: beStateRoot,
|
||||||
|
vGen: none(seq[VertexID]))
|
||||||
|
if upper.src != beStateRoot:
|
||||||
|
return err((VertexID(1),FilStateRootMismatch))
|
||||||
|
return ok(upper)
|
||||||
|
|
||||||
|
# Degenerate case: `upper` is non-trivial and `lower` is void
|
||||||
|
if upper.isNil or upper.vGen.isNone:
|
||||||
|
if lower.src != beStateRoot:
|
||||||
|
return err((VertexID(0), FilStateRootMismatch))
|
||||||
|
return ok(lower)
|
||||||
|
|
||||||
|
# Verify stackability
|
||||||
|
if upper.src != lower.trg or
|
||||||
|
lower.src != beStateRoot:
|
||||||
|
return err((VertexID(0), FilStateRootMismatch))
|
||||||
|
|
||||||
|
# There is no need to deep copy table vertices as they will not be modified.
|
||||||
|
let newFilter = AristoFilterRef(
|
||||||
|
src: lower.src,
|
||||||
|
sTab: lower.sTab,
|
||||||
|
kMap: lower.kMap,
|
||||||
|
vGen: upper.vGen,
|
||||||
|
trg: upper.trg)
|
||||||
|
|
||||||
|
for (vid,vtx) in upper.sTab.pairs:
|
||||||
|
if vtx.isValid or not newFilter.sTab.hasKey vid:
|
||||||
|
newFilter.sTab[vid] = vtx
|
||||||
|
elif newFilter.sTab.getOrVoid(vid).isValid:
|
||||||
|
let rc = db.getVtxUBE vid
|
||||||
|
if rc.isOk:
|
||||||
|
newFilter.sTab[vid] = vtx # VertexRef(nil)
|
||||||
|
elif rc.error == GetVtxNotFound:
|
||||||
|
newFilter.sTab.del vid
|
||||||
|
else:
|
||||||
|
return err((vid,rc.error))
|
||||||
|
|
||||||
|
for (vid,key) in upper.kMap.pairs:
|
||||||
|
if key.isValid or not newFilter.kMap.hasKey vid:
|
||||||
|
newFilter.kMap[vid] = key
|
||||||
|
elif newFilter.kMap.getOrVoid(vid).isValid:
|
||||||
|
let rc = db.getKeyUBE vid
|
||||||
|
if rc.isOk:
|
||||||
|
newFilter.kMap[vid] = key # VOID_HASH_KEY
|
||||||
|
elif rc.error == GetKeyNotFound:
|
||||||
|
newFilter.kMap.del vid
|
||||||
|
else:
|
||||||
|
return err((vid,rc.error))
|
||||||
|
|
||||||
|
ok newFilter
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public helpers
|
# Public helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -122,6 +202,51 @@ proc fwdFilter*(
|
||||||
vGen: some(layer.vGen.vidReorg), # Compact recycled IDs
|
vGen: some(layer.vGen.vidReorg), # Compact recycled IDs
|
||||||
trg: trgRoot)
|
trg: trgRoot)
|
||||||
|
|
||||||
|
|
||||||
|
proc revFilter*(
|
||||||
|
db: AristoDbRef;
|
||||||
|
filter: AristoFilterRef;
|
||||||
|
): Result[AristoFilterRef,(VertexID,AristoError)] =
|
||||||
|
## Assemble reverse filter for the `filter` argument, i.e. changes to the
|
||||||
|
## backend that reverse the effect of applying the this read-only filter.
|
||||||
|
##
|
||||||
|
## This read-only filter is calculated against the current unfiltered
|
||||||
|
## backend (excluding optionally installed read-only filter.)
|
||||||
|
##
|
||||||
|
# Register MPT state roots for reverting back
|
||||||
|
let rev = AristoFilterRef(
|
||||||
|
src: filter.trg,
|
||||||
|
trg: filter.src)
|
||||||
|
|
||||||
|
# Get vid generator state on backend
|
||||||
|
block:
|
||||||
|
let rc = db.getIdgUBE()
|
||||||
|
if rc.isErr:
|
||||||
|
return err((VertexID(0), rc.error))
|
||||||
|
rev.vGen = some rc.value
|
||||||
|
|
||||||
|
# Calculate reverse changes for the `sTab[]` structural table
|
||||||
|
for vid in filter.sTab.keys:
|
||||||
|
let rc = db.getVtxUBE vid
|
||||||
|
if rc.isOk:
|
||||||
|
rev.sTab[vid] = rc.value
|
||||||
|
elif rc.error == GetVtxNotFound:
|
||||||
|
rev.sTab[vid] = VertexRef(nil)
|
||||||
|
else:
|
||||||
|
return err((vid,rc.error))
|
||||||
|
|
||||||
|
# Calculate reverse changes for the `kMap` sequence.
|
||||||
|
for vid in filter.kMap.keys:
|
||||||
|
let rc = db.getKeyUBE vid
|
||||||
|
if rc.isOk:
|
||||||
|
rev.kMap[vid] = rc.value
|
||||||
|
elif rc.error == GetKeyNotFound:
|
||||||
|
rev.kMap[vid] = VOID_HASH_KEY
|
||||||
|
else:
|
||||||
|
return err((vid,rc.error))
|
||||||
|
|
||||||
|
ok(rev)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions, apply/install filters
|
# Public functions, apply/install filters
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -130,81 +255,148 @@ proc merge*(
|
||||||
db: AristoDbRef;
|
db: AristoDbRef;
|
||||||
filter: AristoFilterRef;
|
filter: AristoFilterRef;
|
||||||
): Result[void,(VertexID,AristoError)] =
|
): Result[void,(VertexID,AristoError)] =
|
||||||
## Merge argument `filter` to the filter layer.
|
## Merge the argument `filter` into the read-only filter layer. Note that
|
||||||
##
|
## this function has no control of the filter source. Having merged the
|
||||||
## Comparing before and after merge
|
## argument `filter`, all the `top` and `stack` layers should be cleared.
|
||||||
## ::
|
let ubeRootKey = block:
|
||||||
## current | merged
|
let rc = db.getKeyUBE VertexID(1)
|
||||||
## ----------------------------------+--------------------------------
|
if rc.isOk:
|
||||||
## trg2 --filter-- (src2==trg1) |
|
rc.value
|
||||||
## | trg2 --newFilter-- (src1==trg0)
|
elif rc.error == GetKeyNotFound:
|
||||||
## trg1 --db.roFilter-- (src1==trg0) |
|
VOID_HASH_KEY
|
||||||
## |
|
else:
|
||||||
## trg0 --db.backend | trg0 --db.backend
|
return err((VertexID(1),rc.error))
|
||||||
## |
|
|
||||||
let beRoot = block:
|
db.roFilter = block:
|
||||||
let rc = db.getBeStateRoot()
|
let rc = db.merge(filter, db.roFilter, ubeRootKey)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
return err((VertexID(1),FilStateRootMissing))
|
return err(rc.error)
|
||||||
rc.value
|
rc.value
|
||||||
|
|
||||||
if filter.vGen.isNone:
|
|
||||||
# Blind argument filter
|
|
||||||
if db.roFilter.isNil:
|
|
||||||
# Force read-only system
|
|
||||||
db.roFilter = AristoFilterRef(
|
|
||||||
src: beRoot,
|
|
||||||
trg: beRoot,
|
|
||||||
vGen: none(seq[VertexID]))
|
|
||||||
return ok()
|
|
||||||
|
|
||||||
# Simple case: no read-only filter yet
|
|
||||||
if db.roFilter.isNil or db.roFilter.vGen.isNone:
|
|
||||||
if filter.src != beRoot:
|
|
||||||
return err((VertexID(1),FilStateRootMismatch))
|
|
||||||
db.roFilter = filter
|
|
||||||
return ok()
|
|
||||||
|
|
||||||
# Verify merge stackability into existing read-only filter
|
|
||||||
if filter.src != db.roFilter.trg:
|
|
||||||
return err((VertexID(1),FilStateRootMismatch))
|
|
||||||
|
|
||||||
# Merge `filter` into `roFilter` as `newFilter`. There is no need to deep
|
|
||||||
# copy table vertices as they will not be modified.
|
|
||||||
let newFilter = AristoFilterRef(
|
|
||||||
src: db.roFilter.src,
|
|
||||||
sTab: db.roFilter.sTab,
|
|
||||||
kMap: db.roFilter.kMap,
|
|
||||||
vGen: filter.vGen,
|
|
||||||
trg: filter.trg)
|
|
||||||
|
|
||||||
for (vid,vtx) in filter.sTab.pairs:
|
|
||||||
if vtx.isValid or not newFilter.sTab.hasKey vid:
|
|
||||||
newFilter.sTab[vid] = vtx
|
|
||||||
elif newFilter.sTab.getOrVoid(vid).isValid:
|
|
||||||
let rc = db.getVtxUBE vid
|
|
||||||
if rc.isOk:
|
|
||||||
newFilter.sTab[vid] = vtx # VertexRef(nil)
|
|
||||||
elif rc.error == GetVtxNotFound:
|
|
||||||
newFilter.sTab.del vid
|
|
||||||
else:
|
|
||||||
return err((vid,rc.error))
|
|
||||||
|
|
||||||
for (vid,key) in filter.kMap.pairs:
|
|
||||||
if key.isValid or not newFilter.kMap.hasKey vid:
|
|
||||||
newFilter.kMap[vid] = key
|
|
||||||
elif newFilter.kMap.getOrVoid(vid).isValid:
|
|
||||||
let rc = db.getKeyUBE vid
|
|
||||||
if rc.isOk:
|
|
||||||
newFilter.kMap[vid] = key # VOID_HASH_KEY
|
|
||||||
elif rc.error == GetKeyNotFound:
|
|
||||||
newFilter.kMap.del vid
|
|
||||||
else:
|
|
||||||
return err((vid,rc.error))
|
|
||||||
|
|
||||||
db.roFilter = newFilter
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
|
|
||||||
|
proc canResolveBE*(db: AristoDbRef): bool =
|
||||||
|
## Check whether the read-only filter can be merged into the backend
|
||||||
|
if not db.backend.isNil:
|
||||||
|
if db.dudes.isNil or db.dudes.rwOk:
|
||||||
|
return true
|
||||||
|
|
||||||
|
|
||||||
|
proc resolveBE*(db: AristoDbRef): Result[void,(VertexID,AristoError)] =
|
||||||
|
## Resolve the backend filter into the physical backend. This requires that
|
||||||
|
## the argument `db` descriptor has read-write permission for the backend
|
||||||
|
## (see also the below function `ackqRwMode()`.)
|
||||||
|
##
|
||||||
|
## For any associated descriptors working on the same backend, their backend
|
||||||
|
## filters will be updated so that the change of the backend DB remains
|
||||||
|
## unnoticed.
|
||||||
|
if not db.canResolveBE():
|
||||||
|
return err((VertexID(1),FilRoBackendOrMissing))
|
||||||
|
|
||||||
|
# Blind or missing filter
|
||||||
|
if db.roFilter.isNil:
|
||||||
|
return ok()
|
||||||
|
if db.roFilter.vGen.isNone:
|
||||||
|
db.roFilter = AristoFilterRef(nil)
|
||||||
|
return ok()
|
||||||
|
|
||||||
|
let ubeRootKey = block:
|
||||||
|
let rc = db.getKeyUBE VertexID(1)
|
||||||
|
if rc.isOk:
|
||||||
|
rc.value
|
||||||
|
elif rc.error == GetKeyNotFound:
|
||||||
|
VOID_HASH_KEY
|
||||||
|
else:
|
||||||
|
return err((VertexID(1),rc.error))
|
||||||
|
|
||||||
|
# Filters rollback helper
|
||||||
|
var roFilters: seq[(AristoDbRef,AristoFilterRef)]
|
||||||
|
proc rollback() =
|
||||||
|
for (d,f) in roFilters:
|
||||||
|
d.roFilter = f
|
||||||
|
|
||||||
|
# Update dudes
|
||||||
|
if not db.dudes.isNil:
|
||||||
|
# Calculate reverse filter from current filter
|
||||||
|
let rev = block:
|
||||||
|
let rc = db.revFilter db.roFilter
|
||||||
|
if rc.isErr:
|
||||||
|
return err(rc.error)
|
||||||
|
rc.value
|
||||||
|
|
||||||
|
# Update distributed filters. Note that the physical backend database
|
||||||
|
# has not been updated, yet. So the new root key for the backend will
|
||||||
|
# be `db.roFilter.trg`.
|
||||||
|
for dude in db.dudes.roDudes.items:
|
||||||
|
let rc = db.merge(dude.roFilter, rev, db.roFilter.trg)
|
||||||
|
if rc.isErr:
|
||||||
|
rollback()
|
||||||
|
return err(rc.error)
|
||||||
|
roFilters.add (dude, dude.roFilter)
|
||||||
|
dude.roFilter = rc.value
|
||||||
|
|
||||||
|
# Save structural and other table entries
|
||||||
|
let
|
||||||
|
be = db.backend
|
||||||
|
txFrame = be.putBegFn()
|
||||||
|
be.putVtxFn(txFrame, db.roFilter.sTab.pairs.toSeq)
|
||||||
|
be.putKeyFn(txFrame, db.roFilter.kMap.pairs.toSeq)
|
||||||
|
be.putIdgFn(txFrame, db.roFilter.vGen.unsafeGet)
|
||||||
|
let w = be.putEndFn txFrame
|
||||||
|
if w != AristoError(0):
|
||||||
|
rollback()
|
||||||
|
return err((VertexID(0),w))
|
||||||
|
|
||||||
|
ok()
|
||||||
|
|
||||||
|
|
||||||
|
proc ackqRwMode*(db: AristoDbRef): Result[void,AristoError] =
|
||||||
|
## Re-focus the `db` argument descriptor to backend read-write permission.
|
||||||
|
if not db.dudes.isNil and not db.dudes.rwOk:
|
||||||
|
# Steal dudes list, make the rw-parent a read-only dude
|
||||||
|
let parent = db.dudes.rwDb
|
||||||
|
db.dudes = parent.dudes
|
||||||
|
parent.dudes = AristoDudesRef(rwOk: false, rwDb: db)
|
||||||
|
|
||||||
|
# Exclude self
|
||||||
|
db.dudes.roDudes.excl db
|
||||||
|
|
||||||
|
# Update dudes
|
||||||
|
for w in db.dudes.roDudes:
|
||||||
|
# Let all other dudes refer to this one
|
||||||
|
w.dudes.rwDb = db
|
||||||
|
|
||||||
|
# Update dudes list (parent was alredy updated)
|
||||||
|
db.dudes.roDudes.incl parent
|
||||||
|
return ok()
|
||||||
|
|
||||||
|
err(FilNotReadOnlyDude)
|
||||||
|
|
||||||
|
|
||||||
|
proc dispose*(db: AristoDbRef): Result[void,AristoError] =
|
||||||
|
## Terminate usage of the `db` argument descriptor with backend read-only
|
||||||
|
## permission.
|
||||||
|
##
|
||||||
|
## This type of descriptoy should always be terminated after use. Otherwise
|
||||||
|
## it would always be udated when running `resolveBE()` which costs
|
||||||
|
## unnecessary computing ressources. Also, the read-only backend filter
|
||||||
|
## copies might grow big when it could be avoided.
|
||||||
|
if not db.isNil and
|
||||||
|
not db.dudes.isNil and
|
||||||
|
not db.dudes.rwOk:
|
||||||
|
# Unlink argument `db`
|
||||||
|
db.dudes.rwDb.dudes.roDudes.excl db
|
||||||
|
|
||||||
|
# Unlink more so it would not do harm if used wrongly
|
||||||
|
db.stack.setlen(0)
|
||||||
|
db.backend = AristoBackendRef(nil)
|
||||||
|
db.txRef = AristoTxRef(nil)
|
||||||
|
db.dudes = AristoDudesRef(nil)
|
||||||
|
return ok()
|
||||||
|
|
||||||
|
err(FilNotReadOnlyDude)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
|
@ -58,6 +58,16 @@ type
|
||||||
BackVidTab =
|
BackVidTab =
|
||||||
Table[VertexID,BackVidValRef]
|
Table[VertexID,BackVidValRef]
|
||||||
|
|
||||||
|
BackWVtxRef = ref object
|
||||||
|
w: BackVidValRef
|
||||||
|
vtx: VertexRef
|
||||||
|
|
||||||
|
BackWVtxTab =
|
||||||
|
Table[VertexID,BackWVtxRef]
|
||||||
|
|
||||||
|
const
|
||||||
|
SubTreeSearchDepthMax = 64
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "aristo-hashify"
|
topics = "aristo-hashify"
|
||||||
|
|
||||||
|
@ -71,9 +81,15 @@ template logTxt(info: static[string]): static[string] =
|
||||||
func getOrVoid(tab: BackVidTab; vid: VertexID): BackVidValRef =
|
func getOrVoid(tab: BackVidTab; vid: VertexID): BackVidValRef =
|
||||||
tab.getOrDefault(vid, BackVidValRef(nil))
|
tab.getOrDefault(vid, BackVidValRef(nil))
|
||||||
|
|
||||||
|
func getOrVoid(tab: BackWVtxTab; vid: VertexID): BackWVtxRef =
|
||||||
|
tab.getOrDefault(vid, BackWVtxRef(nil))
|
||||||
|
|
||||||
func isValid(brv: BackVidValRef): bool =
|
func isValid(brv: BackVidValRef): bool =
|
||||||
brv != BackVidValRef(nil)
|
brv != BackVidValRef(nil)
|
||||||
|
|
||||||
|
func isValid(brv: BackWVtxRef): bool =
|
||||||
|
brv != BackWVtxRef(nil)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private functions
|
# Private functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -209,6 +225,119 @@ proc deletedLeafHasher(
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
|
# ------------------
|
||||||
|
|
||||||
|
proc resolveStateRoots(
|
||||||
|
db: AristoDbRef; # Database, top layer
|
||||||
|
uVids: BackVidTab; # Unresolved vertex IDs
|
||||||
|
): Result[void,(VertexID,AristoError)] =
|
||||||
|
## Resolve unresolved nodes. There might be a sub-tree on the backend which
|
||||||
|
## blocks resolving the current structure. So search the `uVids` argument
|
||||||
|
## list for missing vertices and resolve it.
|
||||||
|
#
|
||||||
|
# Update out-of-path hashes, i.e. fill gaps caused by branching out from
|
||||||
|
# `downMost` table vertices.
|
||||||
|
#
|
||||||
|
# Example
|
||||||
|
# ::
|
||||||
|
# $1 ^
|
||||||
|
# \ |
|
||||||
|
# $7 -- $6 -- leaf $8 | on top layer,
|
||||||
|
# \ `--- leaf $9 | $5..$9 were inserted,
|
||||||
|
# $5 | $1 was redefined
|
||||||
|
# \ v
|
||||||
|
# \
|
||||||
|
# \ ^
|
||||||
|
# $4 -- leaf $2 | from
|
||||||
|
# `--- leaf $3 | backend (BE)
|
||||||
|
# v
|
||||||
|
# backLink[] = {$7}
|
||||||
|
# downMost[] = {$7}
|
||||||
|
# top.kMap[] = {£1, £6, £8, £9}
|
||||||
|
# BE.kMap[] = {£1, £2, £3, £4}
|
||||||
|
#
|
||||||
|
# So `$5` (needed for `$7`) cannot be resolved because it is neither on
|
||||||
|
# the path `($1..$8)`, nor is it on `($1..$9)`.
|
||||||
|
#
|
||||||
|
var follow: BackWVtxTab
|
||||||
|
|
||||||
|
proc wVtxRef(db: AristoDbRef; root, vid, toVid: VertexID): BackWVtxRef =
|
||||||
|
let vtx = db.getVtx vid
|
||||||
|
if vtx.isValid:
|
||||||
|
return BackWVtxRef(
|
||||||
|
vtx: vtx,
|
||||||
|
w: BackVidValRef(
|
||||||
|
root: root,
|
||||||
|
onBe: not db.top.sTab.getOrVoid(vid).isValid,
|
||||||
|
toVid: toVid))
|
||||||
|
|
||||||
|
# Init `follow` table by unresolved `Branch` leafs from `vidTab`
|
||||||
|
for (uVid,uVal) in uVids.pairs:
|
||||||
|
let uVtx = db.getVtx uVid
|
||||||
|
if uVtx.isValid and uVtx.vType == Branch:
|
||||||
|
var didSomething = false
|
||||||
|
for vid in uVtx.bVid:
|
||||||
|
if vid.isValid and not db.getKey(vid).isValid:
|
||||||
|
let w = db.wVtxRef(root=uVal.root, vid=vid, toVid=uVid)
|
||||||
|
if not w.isNil:
|
||||||
|
follow[vid] = w
|
||||||
|
didSomething = true
|
||||||
|
# Add state root to be resolved, as well
|
||||||
|
if didSomething and not follow.hasKey uVal.root:
|
||||||
|
let w = db.wVtxRef(root=uVal.root, vid=uVal.root, toVid=uVal.root)
|
||||||
|
if not w.isNil:
|
||||||
|
follow[uVal.root] = w
|
||||||
|
|
||||||
|
# Update and re-collect into `follow` table
|
||||||
|
var level = 0
|
||||||
|
while 0 < follow.len:
|
||||||
|
var
|
||||||
|
changes = false
|
||||||
|
redo: BackWVtxTab
|
||||||
|
for (fVid,fVal) in follow.pairs:
|
||||||
|
# Resolve or keep for later
|
||||||
|
let rc = fVal.vtx.toNode db
|
||||||
|
if rc.isOk:
|
||||||
|
# Update Merkle hash
|
||||||
|
let
|
||||||
|
key = rc.value.to(HashKey)
|
||||||
|
rx = db.updateHashKey(fVal.w.root, fVid, key, fVal.w.onBe)
|
||||||
|
if rx.isErr:
|
||||||
|
return err((fVid, rx.error))
|
||||||
|
changes = true
|
||||||
|
else:
|
||||||
|
# Cannot complete with this vertex, so dig deeper and do it later
|
||||||
|
redo[fVid] = fVal
|
||||||
|
|
||||||
|
case fVal.vtx.vType:
|
||||||
|
of Branch:
|
||||||
|
for vid in fVal.vtx.bVid:
|
||||||
|
if vid.isValid and not db.getKey(vid).isValid:
|
||||||
|
let w = db.wVtxRef(root=fVal.w.root, vid=vid, toVid=fVid)
|
||||||
|
if not w.isNil:
|
||||||
|
changes = true
|
||||||
|
redo[vid] = w
|
||||||
|
of Extension:
|
||||||
|
let vid = fVal.vtx.eVid
|
||||||
|
if vid.isValid and not db.getKey(vid).isValid:
|
||||||
|
let w = db.wVtxRef(root=fVal.w.root,vid=vid, toVid=fVid)
|
||||||
|
if not w.isNil:
|
||||||
|
changes = true
|
||||||
|
redo[vid] = w
|
||||||
|
of Leaf:
|
||||||
|
# Should habe been hashed earlier
|
||||||
|
return err((fVid,HashifyDownVtxLeafUnexpected))
|
||||||
|
|
||||||
|
# Beware of loops
|
||||||
|
if not changes or SubTreeSearchDepthMax < level:
|
||||||
|
return err((VertexID(0),HashifyDownVtxlevelExceeded))
|
||||||
|
|
||||||
|
# Restart with a new instance of `follow`
|
||||||
|
redo.swap follow
|
||||||
|
level.inc
|
||||||
|
|
||||||
|
ok()
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -244,7 +373,7 @@ proc hashify*(
|
||||||
for (lky,vid) in db.top.lTab.pairs:
|
for (lky,vid) in db.top.lTab.pairs:
|
||||||
let hike = lky.hikeUp(db)
|
let hike = lky.hikeUp(db)
|
||||||
|
|
||||||
# There might be deleted entries on the leaf table. If this is tha case,
|
# There might be deleted entries on the leaf table. If this is the case,
|
||||||
# the Merkle hashes for the vertices in the `hike` can all be compiled.
|
# the Merkle hashes for the vertices in the `hike` can all be compiled.
|
||||||
if not vid.isValid:
|
if not vid.isValid:
|
||||||
let rc = db.deletedLeafHasher hike
|
let rc = db.deletedLeafHasher hike
|
||||||
|
@ -268,12 +397,17 @@ proc hashify*(
|
||||||
# Backtrack and register remaining nodes. Note that in case *n == 0*,
|
# Backtrack and register remaining nodes. Note that in case *n == 0*,
|
||||||
# the root vertex has not been fully resolved yet.
|
# the root vertex has not been fully resolved yet.
|
||||||
#
|
#
|
||||||
# hike.legs: (leg[0], leg[1], .., leg[n-1], leg[n], ..)
|
# .. unresolved hash keys | all set here ..
|
||||||
# | | | |
|
# |
|
||||||
# | <---- | <---- | <---- |
|
# |
|
||||||
# | | |
|
# hike.legs: (leg[0], leg[1], ..leg[n-1], leg[n], ..)
|
||||||
# | backLink[] | downMost |
|
# | | | |
|
||||||
|
# | <---- | <---- | <-------- |
|
||||||
|
# | | |
|
||||||
|
# | backLink[] | downMost[] |
|
||||||
#
|
#
|
||||||
|
if n+1 < hike.legs.len:
|
||||||
|
downMost.del hike.legs[n+1].wp.vid
|
||||||
downMost[hike.legs[n].wp.vid] = BackVidValRef(
|
downMost[hike.legs[n].wp.vid] = BackVidValRef(
|
||||||
root: hike.root,
|
root: hike.root,
|
||||||
onBe: hike.legs[n].backend,
|
onBe: hike.legs[n].backend,
|
||||||
|
@ -289,7 +423,9 @@ proc hashify*(
|
||||||
# At least one full path leaf..root should have succeeded with labelling
|
# At least one full path leaf..root should have succeeded with labelling
|
||||||
# for each root.
|
# for each root.
|
||||||
if completed.len < roots.len:
|
if completed.len < roots.len:
|
||||||
return err((VertexID(0),HashifyLeafToRootAllFailed))
|
let rc = db.resolveStateRoots backLink
|
||||||
|
if rc.isErr:
|
||||||
|
return err(rc.error)
|
||||||
|
|
||||||
# Update remaining hashes
|
# Update remaining hashes
|
||||||
while 0 < downMost.len:
|
while 0 < downMost.len:
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
|
std/sets,
|
||||||
results,
|
results,
|
||||||
../aristo_desc,
|
../aristo_desc,
|
||||||
../aristo_desc/aristo_types_backend,
|
../aristo_desc/aristo_types_backend,
|
||||||
|
@ -57,12 +58,22 @@ proc finish*(db: AristoDbRef; flush = false) =
|
||||||
## depending on the type of backend (e.g. the `BackendMemory` backend will
|
## depending on the type of backend (e.g. the `BackendMemory` backend will
|
||||||
## always flush on close.)
|
## always flush on close.)
|
||||||
##
|
##
|
||||||
|
## In case of distributed descriptors accessing the same backend, all
|
||||||
|
## distributed descriptors will be destroyed.
|
||||||
|
##
|
||||||
## This distructor may be used on already *destructed* descriptors.
|
## This distructor may be used on already *destructed* descriptors.
|
||||||
if not db.backend.isNil:
|
##
|
||||||
db.backend.closeFn flush
|
if not db.isNil:
|
||||||
db.backend = AristoBackendRef(nil)
|
if not db.backend.isNil:
|
||||||
db.top = AristoLayerRef(nil)
|
db.backend.closeFn flush
|
||||||
db.stack.setLen(0)
|
|
||||||
|
if db.dudes.isNil:
|
||||||
|
db[] = AristoDbObj()
|
||||||
|
else:
|
||||||
|
let lebo = if db.dudes.rwOk: db else: db.dudes.rwDb
|
||||||
|
for w in lebo.dudes.roDudes:
|
||||||
|
w[] = AristoDbObj()
|
||||||
|
lebo[] = AristoDbObj()
|
||||||
|
|
||||||
# -----------------
|
# -----------------
|
||||||
|
|
||||||
|
|
|
@ -14,9 +14,9 @@
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[options, sequtils, tables],
|
std/[options, sets],
|
||||||
results,
|
results,
|
||||||
"."/[aristo_desc, aristo_filter, aristo_hashify]
|
"."/[aristo_desc, aristo_filter, aristo_get, aristo_hashify]
|
||||||
|
|
||||||
func isTop*(tx: AristoTxRef): bool
|
func isTop*(tx: AristoTxRef): bool
|
||||||
|
|
||||||
|
@ -32,16 +32,24 @@ func getDbDescFromTopTx(tx: AristoTxRef): Result[AristoDbRef,AristoError] =
|
||||||
return err(TxStackUnderflow)
|
return err(TxStackUnderflow)
|
||||||
ok db
|
ok db
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
# Private functions
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
proc getTxUid(db: AristoDbRef): uint =
|
proc getTxUid(db: AristoDbRef): uint =
|
||||||
if db.txUidGen == high(uint):
|
if db.txUidGen == high(uint):
|
||||||
db.txUidGen = 0
|
db.txUidGen = 0
|
||||||
db.txUidGen.inc
|
db.txUidGen.inc
|
||||||
db.txUidGen
|
db.txUidGen
|
||||||
|
|
||||||
|
proc linkClone(db: AristoDbRef; clone: AristoDbRef) =
|
||||||
|
## Link clone to parent
|
||||||
|
clone.dudes = AristoDudesRef(
|
||||||
|
rwOk: false,
|
||||||
|
rwDb: db)
|
||||||
|
if db.dudes.isNil:
|
||||||
|
db.dudes = AristoDudesRef(
|
||||||
|
rwOk: true,
|
||||||
|
roDudes: @[clone].toHashSet)
|
||||||
|
else:
|
||||||
|
db.dudes.roDudes.incl clone
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions, getters
|
# Public functions, getters
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -75,19 +83,107 @@ func to*(tx: AristoTxRef; T: type[AristoDbRef]): T =
|
||||||
## Getter, retrieves the parent database descriptor from argument `tx`
|
## Getter, retrieves the parent database descriptor from argument `tx`
|
||||||
tx.db
|
tx.db
|
||||||
|
|
||||||
proc rebase*(
|
|
||||||
tx: AristoTxRef; # Some transaction on database
|
proc copyCat*(tx: AristoTxRef): Result[AristoDbRef,AristoError] =
|
||||||
): Result[void,AristoError] =
|
## Clone a transaction into a new DB descriptor. The new descriptor is linked
|
||||||
## Revert transaction stack to an earlier point in time.
|
## to the transaction parent and will be updated with functions like
|
||||||
if not tx.isTop():
|
## `aristo_filter.resolveBE()` or `aristo_filter.ackqRwMode()`. The new
|
||||||
let
|
## descriptor is fully functional apart from the fact that the physical
|
||||||
db = tx.db
|
## backend cannot be updated (but see `aristo_filter.ackqRwMode()`.)
|
||||||
inx = tx.level
|
##
|
||||||
if db.stack.len <= inx or db.stack[inx].txUid != tx.txUid:
|
## The new DB descriptor contains a copy of the argument transaction `tx` as
|
||||||
return err(TxArgStaleTx)
|
## top layer of level 1 (i.e. this is he only transaction.) Rolling back will
|
||||||
# Roll back to some earlier layer.
|
## end up at the backend layer (incl. backend filter.)
|
||||||
db.top = db.stack[inx]
|
##
|
||||||
db.stack.setLen(inx)
|
## Use `aristo_filter.dispose()` to clean up the new DB descriptor.
|
||||||
|
##
|
||||||
|
let db = tx.db
|
||||||
|
|
||||||
|
# Provide new top layer
|
||||||
|
var topLayer: AristoLayerRef
|
||||||
|
if db.txRef == tx:
|
||||||
|
topLayer = db.top.dup
|
||||||
|
elif tx.level < db.stack.len:
|
||||||
|
topLayer = db.stack[tx.level].dup
|
||||||
|
else:
|
||||||
|
return err(TxArgStaleTx)
|
||||||
|
if topLayer.txUid != tx.txUid:
|
||||||
|
return err(TxArgStaleTx)
|
||||||
|
topLayer.txUid = 1
|
||||||
|
|
||||||
|
# Empty stack
|
||||||
|
let stackLayer = block:
|
||||||
|
let rc = db.getIdgBE()
|
||||||
|
if rc.isOk:
|
||||||
|
AristoLayerRef(vGen: rc.value)
|
||||||
|
elif rc.error == GetIdgNotFound:
|
||||||
|
AristoLayerRef()
|
||||||
|
else:
|
||||||
|
return err(rc.error)
|
||||||
|
|
||||||
|
# Set up clone associated to `db`
|
||||||
|
let txClone = AristoDbRef(
|
||||||
|
top: topLayer, # is a deep copy
|
||||||
|
stack: @[stackLayer],
|
||||||
|
roFilter: db.roFilter, # no need to copy contents (done when updated)
|
||||||
|
backend: db.backend,
|
||||||
|
txUidGen: 1,
|
||||||
|
dudes: AristoDudesRef(
|
||||||
|
rwOk: false,
|
||||||
|
rwDb: db))
|
||||||
|
|
||||||
|
# Link clone to parent
|
||||||
|
db.linkClone txClone
|
||||||
|
|
||||||
|
# Install transaction similar to `tx` on clone
|
||||||
|
txClone.txRef = AristoTxRef(
|
||||||
|
db: txClone,
|
||||||
|
txUid: 1,
|
||||||
|
level: 1)
|
||||||
|
|
||||||
|
ok(txClone)
|
||||||
|
|
||||||
|
proc copyCat*(db: AristoDbRef): Result[AristoDbRef,AristoError] =
|
||||||
|
## Variant of `copyCat()`. If there is a transaction pending, then the
|
||||||
|
## function returns `db.txTop.value.copyCat()`. Otherwise it returns a
|
||||||
|
## clone of the top layer.
|
||||||
|
##
|
||||||
|
## Use `aristo_filter.dispose()` to clean up the copy cat descriptor.
|
||||||
|
##
|
||||||
|
if db.txRef.isNil:
|
||||||
|
let dbClone = AristoDbRef(
|
||||||
|
top: db.top.dup, # is a deep copy
|
||||||
|
roFilter: db.roFilter, # no need to copy contents (done when updated)
|
||||||
|
backend: db.backend)
|
||||||
|
|
||||||
|
# Link clone to parent
|
||||||
|
db.linkClone dbClone
|
||||||
|
return ok(dbClone)
|
||||||
|
|
||||||
|
db.txRef.copyCat()
|
||||||
|
|
||||||
|
|
||||||
|
proc exec*(
|
||||||
|
tx: AristoTxRef;
|
||||||
|
action: AristoDbAction;
|
||||||
|
): Result[void,AristoError]
|
||||||
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
|
## Execute function argument `action()` on a temporary `tx.copyCat()`
|
||||||
|
## transaction database. After return, the temporary database gets
|
||||||
|
## destroyed.
|
||||||
|
##
|
||||||
|
let db = block:
|
||||||
|
let rc = tx.copyCat()
|
||||||
|
if rc.isErr:
|
||||||
|
return err(rc.error)
|
||||||
|
rc.value
|
||||||
|
|
||||||
|
db.action()
|
||||||
|
|
||||||
|
block:
|
||||||
|
let rc = db.dispose()
|
||||||
|
if rc.isErr:
|
||||||
|
return err(rc.error)
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -158,7 +254,7 @@ proc commit*(
|
||||||
return err((VertexID(0),rc.error))
|
return err((VertexID(0),rc.error))
|
||||||
rc.value
|
rc.value
|
||||||
|
|
||||||
if not dontHashify:
|
if db.top.dirty and not dontHashify:
|
||||||
let rc = db.hashify()
|
let rc = db.hashify()
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
return err(rc.error)
|
return err(rc.error)
|
||||||
|
@ -196,8 +292,8 @@ proc collapse*(
|
||||||
if not commit:
|
if not commit:
|
||||||
db.stack[0].swap db.top
|
db.stack[0].swap db.top
|
||||||
|
|
||||||
if not dontHashify:
|
if db.top.dirty and not dontHashify:
|
||||||
var rc = db.hashify()
|
let rc = db.hashify()
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
if not commit:
|
if not commit:
|
||||||
db.stack[0].swap db.top # restore
|
db.stack[0].swap db.top # restore
|
||||||
|
@ -218,8 +314,8 @@ proc stow*(
|
||||||
chunkedMpt = false; # Partial data (e.g. from `snap`)
|
chunkedMpt = false; # Partial data (e.g. from `snap`)
|
||||||
): Result[void,(VertexID,AristoError)] =
|
): Result[void,(VertexID,AristoError)] =
|
||||||
## If there is no backend while the `persistent` argument is set `true`,
|
## If there is no backend while the `persistent` argument is set `true`,
|
||||||
## the function returns immediately with an error.The same happens if the
|
## the function returns immediately with an error.The same happens if there
|
||||||
## backend is locked while `persistent` is set (e.g. by an `exec()` call.)
|
## is a pending transaction.
|
||||||
##
|
##
|
||||||
## The `dontHashify` is treated as described for `commit()`.
|
## The `dontHashify` is treated as described for `commit()`.
|
||||||
##
|
##
|
||||||
|
@ -234,9 +330,17 @@ proc stow*(
|
||||||
## If the argument `persistent` is set `true`, all the staged data are merged
|
## If the argument `persistent` is set `true`, all the staged data are merged
|
||||||
## into the physical backend database and the staged data area is cleared.
|
## into the physical backend database and the staged data area is cleared.
|
||||||
##
|
##
|
||||||
let be = db.backend
|
if not db.txRef.isNil:
|
||||||
if be.isNil and persistent:
|
return err((VertexID(0),TxPendingTx))
|
||||||
return err((VertexID(0),TxBackendMissing))
|
if 0 < db.stack.len:
|
||||||
|
return err((VertexID(0),TxStackGarbled))
|
||||||
|
if persistent and not db.canResolveBE():
|
||||||
|
return err((VertexID(0),TxRoBackendOrMissing))
|
||||||
|
|
||||||
|
if db.top.dirty and not dontHashify:
|
||||||
|
let rc = db.hashify()
|
||||||
|
if rc.isErr:
|
||||||
|
return err(rc.error)
|
||||||
|
|
||||||
let fwd = block:
|
let fwd = block:
|
||||||
let rc = db.fwdFilter(db.top, chunkedMpt)
|
let rc = db.fwdFilter(db.top, chunkedMpt)
|
||||||
|
@ -246,22 +350,17 @@ proc stow*(
|
||||||
|
|
||||||
if fwd.vGen.isSome: # Otherwise this layer is pointless
|
if fwd.vGen.isSome: # Otherwise this layer is pointless
|
||||||
block:
|
block:
|
||||||
|
# Merge `top` layer into `roFilter`
|
||||||
let rc = db.merge fwd
|
let rc = db.merge fwd
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
return err(rc.error)
|
return err(rc.error)
|
||||||
rc.value
|
db.top = AristoLayerRef(vGen: db.roFilter.vGen.unsafeGet)
|
||||||
|
|
||||||
if persistent:
|
if persistent:
|
||||||
# Save structural and other table entries
|
let rc = db.resolveBE()
|
||||||
let txFrame = be.putBegFn()
|
if rc.isErr:
|
||||||
be.putVtxFn(txFrame, db.roFilter.sTab.pairs.toSeq)
|
return err(rc.error)
|
||||||
be.putKeyFn(txFrame, db.roFilter.kMap.pairs.toSeq)
|
db.roFilter = AristoFilterRef(nil)
|
||||||
be.putIdgFn(txFrame, db.roFilter.vGen.unsafeGet)
|
|
||||||
let w = be.putEndFn txFrame
|
|
||||||
if w != AristoError(0):
|
|
||||||
return err((VertexID(0),w))
|
|
||||||
|
|
||||||
db.roFilter = AristoFilterRef(nil)
|
|
||||||
|
|
||||||
# Delete or clear stack and clear top
|
# Delete or clear stack and clear top
|
||||||
db.stack.setLen(0)
|
db.stack.setLen(0)
|
||||||
|
|
|
@ -24,7 +24,8 @@ import
|
||||||
../nimbus/sync/snap/worker/db/[rocky_bulk_load, snapdb_accounts, snapdb_desc],
|
../nimbus/sync/snap/worker/db/[rocky_bulk_load, snapdb_accounts, snapdb_desc],
|
||||||
./replay/[pp, undump_accounts, undump_storages],
|
./replay/[pp, undump_accounts, undump_storages],
|
||||||
./test_sync_snap/[snap_test_xx, test_accounts, test_types],
|
./test_sync_snap/[snap_test_xx, test_accounts, test_types],
|
||||||
./test_aristo/[test_backend, test_helpers, test_transcode, test_tx]
|
./test_aristo/[
|
||||||
|
test_backend, test_filter, test_helpers, test_transcode, test_tx]
|
||||||
|
|
||||||
const
|
const
|
||||||
baseDir = [".", "..", ".."/"..", $DirSep]
|
baseDir = [".", "..", ".."/"..", $DirSep]
|
||||||
|
@ -220,6 +221,9 @@ proc accountsRunner(
|
||||||
test &"Delete accounts database, successively {accLst.len} entries":
|
test &"Delete accounts database, successively {accLst.len} entries":
|
||||||
check noisy.testTxMergeAndDelete(accLst, dbDir)
|
check noisy.testTxMergeAndDelete(accLst, dbDir)
|
||||||
|
|
||||||
|
test &"Distributed backend access {accLst.len} entries":
|
||||||
|
check noisy.testDistributedAccess(accLst, dbDir)
|
||||||
|
|
||||||
|
|
||||||
proc storagesRunner(
|
proc storagesRunner(
|
||||||
noisy = true;
|
noisy = true;
|
||||||
|
@ -253,6 +257,9 @@ proc storagesRunner(
|
||||||
test &"Delete storage database, successively {stoLst.len} entries":
|
test &"Delete storage database, successively {stoLst.len} entries":
|
||||||
check noisy.testTxMergeAndDelete(stoLst, dbDir)
|
check noisy.testTxMergeAndDelete(stoLst, dbDir)
|
||||||
|
|
||||||
|
test &"Distributed backend access {stoLst.len} entries":
|
||||||
|
check noisy.testDistributedAccess(stoLst, dbDir)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Main function(s)
|
# Main function(s)
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
|
@ -0,0 +1,423 @@
|
||||||
|
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||||
|
#
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||||
|
# http://opensource.org/licenses/MIT)
|
||||||
|
# at your option. This file may not be copied, modified, or
|
||||||
|
# distributed except according to those terms.
|
||||||
|
|
||||||
|
## Aristo (aka Patricia) DB records distributed backend access test.
|
||||||
|
##
|
||||||
|
|
||||||
|
import
|
||||||
|
eth/common,
|
||||||
|
results,
|
||||||
|
unittest2,
|
||||||
|
../../nimbus/db/aristo/[
|
||||||
|
aristo_check, aristo_debug, aristo_desc, aristo_filter, aristo_get,
|
||||||
|
aristo_merge],
|
||||||
|
../../nimbus/db/[aristo, aristo/aristo_init/persistent],
|
||||||
|
./test_helpers
|
||||||
|
|
||||||
|
type
|
||||||
|
LeafTriplet = tuple
|
||||||
|
a, b, c: seq[LeafTiePayload]
|
||||||
|
|
||||||
|
LeafQuartet = tuple
|
||||||
|
a, b, c, d: seq[LeafTiePayload]
|
||||||
|
|
||||||
|
DbTriplet = object
|
||||||
|
db1, db2, db3: AristoDbRef
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Private debugging helpers
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc dump(pfx: string; dx: varargs[AristoDbRef]): string =
|
||||||
|
proc dump(db: AristoDbRef): string =
|
||||||
|
db.pp & "\n " & db.to(TypedBackendRef).pp(db) & "\n"
|
||||||
|
if 0 < dx.len:
|
||||||
|
result = "\n "
|
||||||
|
var
|
||||||
|
pfx = pfx
|
||||||
|
qfx = ""
|
||||||
|
if pfx.len == 0:
|
||||||
|
(pfx,qfx) = ("[","]")
|
||||||
|
elif 1 < dx.len:
|
||||||
|
pfx = pfx & "#"
|
||||||
|
for n in 0 ..< dx.len:
|
||||||
|
let n1 = n + 1
|
||||||
|
result &= pfx
|
||||||
|
if 1 < dx.len:
|
||||||
|
result &= $n1
|
||||||
|
result &= qfx & "\n " & dx[n].dump
|
||||||
|
if n1 < dx.len:
|
||||||
|
result &= " ==========\n "
|
||||||
|
|
||||||
|
proc dump(dx: varargs[AristoDbRef]): string =
|
||||||
|
"".dump dx
|
||||||
|
|
||||||
|
proc dump(w: DbTriplet): string =
|
||||||
|
"db".dump(w.db1, w.db2, w.db3)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Private helpers
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
iterator quadripartite(td: openArray[ProofTrieData]): LeafQuartet =
|
||||||
|
## ...
|
||||||
|
var collect: seq[seq[LeafTiePayload]]
|
||||||
|
|
||||||
|
for w in td:
|
||||||
|
let lst = w.kvpLst.mapRootVid VertexID(1)
|
||||||
|
|
||||||
|
if lst.len < 8:
|
||||||
|
if 2 < collect.len:
|
||||||
|
yield(collect[0], collect[1], collect[2], lst)
|
||||||
|
collect.setLen(0)
|
||||||
|
else:
|
||||||
|
collect.add lst
|
||||||
|
else:
|
||||||
|
if collect.len == 0:
|
||||||
|
let a = lst.len div 4
|
||||||
|
yield(lst[0 ..< a], lst[a ..< 2*a], lst[2*a ..< 3*a], lst[3*a .. ^1])
|
||||||
|
else:
|
||||||
|
if collect.len == 1:
|
||||||
|
let a = lst.len div 3
|
||||||
|
yield(collect[0], lst[0 ..< a], lst[a ..< 2*a], lst[a .. ^1])
|
||||||
|
elif collect.len == 2:
|
||||||
|
let a = lst.len div 2
|
||||||
|
yield(collect[0], collect[1], lst[0 ..< a], lst[a .. ^1])
|
||||||
|
else:
|
||||||
|
yield(collect[0], collect[1], collect[2], lst)
|
||||||
|
collect.setLen(0)
|
||||||
|
|
||||||
|
proc dbTriplet(w: LeafQuartet; rdbPath: string): Result[DbTriplet,AristoError] =
|
||||||
|
let
|
||||||
|
db1 = block:
|
||||||
|
let rc = newAristoDbRef(BackendRocksDB,rdbPath)
|
||||||
|
if rc.isErr:
|
||||||
|
check rc.error == 0
|
||||||
|
return
|
||||||
|
rc.value
|
||||||
|
|
||||||
|
# Fill backend
|
||||||
|
m0 = db1.merge w.a
|
||||||
|
rc = db1.stow(persistent=true)
|
||||||
|
|
||||||
|
if rc.isErr:
|
||||||
|
check rc.error == (0,0)
|
||||||
|
return
|
||||||
|
|
||||||
|
let
|
||||||
|
db2 = db1.copyCat.value
|
||||||
|
db3 = db1.copyCat.value
|
||||||
|
|
||||||
|
# Clause (9) from `aristo/README.md` example
|
||||||
|
m1 = db1.merge w.b
|
||||||
|
m2 = db2.merge w.c
|
||||||
|
m3 = db3.merge w.d
|
||||||
|
|
||||||
|
if m1.error == 0 and
|
||||||
|
m2.error == 0 and
|
||||||
|
m3.error == 0:
|
||||||
|
return ok DbTriplet(db1: db1, db2: db2, db3: db3)
|
||||||
|
|
||||||
|
# Failed
|
||||||
|
db1.finish(flush=true)
|
||||||
|
|
||||||
|
check m1.error == 0
|
||||||
|
check m2.error == 0
|
||||||
|
check m3.error == 0
|
||||||
|
|
||||||
|
var error = m1.error
|
||||||
|
if error != 0: error = m2.error
|
||||||
|
if error != 0: error = m3.error
|
||||||
|
err(error)
|
||||||
|
|
||||||
|
|
||||||
|
proc checkBeOk(
|
||||||
|
dx: DbTriplet;
|
||||||
|
relax = false;
|
||||||
|
forceCache = false;
|
||||||
|
noisy = true;
|
||||||
|
): bool =
|
||||||
|
check not dx.db1.top.isNil
|
||||||
|
block:
|
||||||
|
let
|
||||||
|
cache = if forceCache: true else: not dx.db1.top.dirty
|
||||||
|
rc1 = dx.db1.checkBE(relax=relax, cache=cache)
|
||||||
|
if rc1.isErr:
|
||||||
|
noisy.say "***", "db1 check failed (do-cache=", cache, ")"
|
||||||
|
check rc1.error == (0,0)
|
||||||
|
return
|
||||||
|
block:
|
||||||
|
let
|
||||||
|
cache = if forceCache: true else: not dx.db2.top.dirty
|
||||||
|
rc2 = dx.db2.checkBE(relax=relax, cache=cache)
|
||||||
|
if rc2.isErr:
|
||||||
|
noisy.say "***", "db2 check failed (do-cache=", cache, ")"
|
||||||
|
check rc2.error == (0,0)
|
||||||
|
return
|
||||||
|
block:
|
||||||
|
let
|
||||||
|
cache = if forceCache: true else: not dx.db3.top.dirty
|
||||||
|
rc3 = dx.db3.checkBE(relax=relax, cache=cache)
|
||||||
|
if rc3.isErr:
|
||||||
|
noisy.say "***", "db3 check failed (do-cache=", cache, ")"
|
||||||
|
check rc3.error == (0,0)
|
||||||
|
return
|
||||||
|
true
|
||||||
|
|
||||||
|
# ---------
|
||||||
|
|
||||||
|
proc cleanUp(dx: DbTriplet) =
|
||||||
|
discard dx.db3.dispose
|
||||||
|
discard dx.db2.dispose
|
||||||
|
dx.db1.finish(flush=true)
|
||||||
|
|
||||||
|
proc eq(a, b: AristoFilterRef; db: AristoDbRef; noisy = true): bool =
|
||||||
|
## Verify that argument filter `a` has the same effect on the
|
||||||
|
## physical/unfiltered backend of `db` as argument filter `b`.
|
||||||
|
if a.isNil:
|
||||||
|
return b.isNil
|
||||||
|
if b.isNil:
|
||||||
|
return false
|
||||||
|
if unsafeAddr(a[]) != unsafeAddr(b[]):
|
||||||
|
if a.src != b.src or
|
||||||
|
a.trg != b.trg or
|
||||||
|
a.vGen != b.vGen:
|
||||||
|
return false
|
||||||
|
|
||||||
|
# Void entries may differ unless on physical backend
|
||||||
|
var (aTab, bTab) = (a.sTab, b.sTab)
|
||||||
|
if aTab.len < bTab.len:
|
||||||
|
aTab.swap bTab
|
||||||
|
for (vid,aVtx) in aTab.pairs:
|
||||||
|
let bVtx = bTab.getOrVoid vid
|
||||||
|
bTab.del vid
|
||||||
|
|
||||||
|
if aVtx != bVtx:
|
||||||
|
if aVtx.isValid and bVtx.isValid:
|
||||||
|
return false
|
||||||
|
# The valid one must match the backend data
|
||||||
|
let rc = db.getVtxUBE vid
|
||||||
|
if rc.isErr:
|
||||||
|
return false
|
||||||
|
let vtx = if aVtx.isValid: aVtx else: bVtx
|
||||||
|
if vtx != rc.value:
|
||||||
|
return false
|
||||||
|
|
||||||
|
elif not vid.isValid and not bTab.hasKey vid:
|
||||||
|
let rc = db.getVtxUBE vid
|
||||||
|
if rc.isOk:
|
||||||
|
return false # Exists on backend but missing on `bTab[]`
|
||||||
|
elif rc.error != GetKeyNotFound:
|
||||||
|
return false # general error
|
||||||
|
|
||||||
|
if 0 < bTab.len:
|
||||||
|
noisy.say "*** eq:", "bTabLen=", bTab.len
|
||||||
|
return false
|
||||||
|
|
||||||
|
# Similar for `kMap[]`
|
||||||
|
var (aMap, bMap) = (a.kMap, b.kMap)
|
||||||
|
if aMap.len < bMap.len:
|
||||||
|
aMap.swap bMap
|
||||||
|
for (vid,aKey) in aMap.pairs:
|
||||||
|
let bKey = bMap.getOrVoid vid
|
||||||
|
bMap.del vid
|
||||||
|
|
||||||
|
if aKey != bKey:
|
||||||
|
if aKey.isValid and bKey.isValid:
|
||||||
|
return false
|
||||||
|
# The valid one must match the backend data
|
||||||
|
let rc = db.getKeyUBE vid
|
||||||
|
if rc.isErr:
|
||||||
|
return false
|
||||||
|
let key = if aKey.isValid: aKey else: bKey
|
||||||
|
if key != rc.value:
|
||||||
|
return false
|
||||||
|
|
||||||
|
elif not vid.isValid and not bMap.hasKey vid:
|
||||||
|
let rc = db.getKeyUBE vid
|
||||||
|
if rc.isOk:
|
||||||
|
return false # Exists on backend but missing on `bMap[]`
|
||||||
|
elif rc.error != GetKeyNotFound:
|
||||||
|
return false # general error
|
||||||
|
|
||||||
|
if 0 < bMap.len:
|
||||||
|
noisy.say "*** eq:", " bMapLen=", bMap.len
|
||||||
|
return false
|
||||||
|
|
||||||
|
true
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public test function
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc testDistributedAccess*(
|
||||||
|
noisy: bool;
|
||||||
|
list: openArray[ProofTrieData];
|
||||||
|
rdbPath: string; # Rocks DB storage directory
|
||||||
|
): bool =
|
||||||
|
var n = 0
|
||||||
|
for w in list.quadripartite:
|
||||||
|
n.inc
|
||||||
|
|
||||||
|
# Resulting clause (11) filters from `aristo/README.md` example
|
||||||
|
# which will be used in the second part of the tests
|
||||||
|
var
|
||||||
|
c11Filter1 = AristoFilterRef(nil)
|
||||||
|
c11Filter3 = AristoFilterRef(nil)
|
||||||
|
|
||||||
|
# Work through clauses (8)..(11) from `aristo/README.md` example
|
||||||
|
block:
|
||||||
|
|
||||||
|
# Clause (8) from `aristo/README.md` example
|
||||||
|
let
|
||||||
|
dx = block:
|
||||||
|
let rc = dbTriplet(w, rdbPath)
|
||||||
|
if rc.isErr:
|
||||||
|
return
|
||||||
|
rc.value
|
||||||
|
(db1, db2, db3) = (dx.db1, dx.db2, dx.db3)
|
||||||
|
defer:
|
||||||
|
dx.cleanUp()
|
||||||
|
|
||||||
|
when false: # or true:
|
||||||
|
noisy.say "*** testDistributedAccess (1)", "n=", n, dx.dump
|
||||||
|
|
||||||
|
# Clause (9) from `aristo/README.md` example
|
||||||
|
block:
|
||||||
|
let rc = db1.stow(persistent=true)
|
||||||
|
if rc.isErr:
|
||||||
|
# noisy.say "*** testDistributedAccess (2) n=", n, dx.dump
|
||||||
|
check rc.error == (0,0)
|
||||||
|
return
|
||||||
|
if db1.roFilter != AristoFilterRef(nil):
|
||||||
|
check db1.roFilter == AristoFilterRef(nil)
|
||||||
|
return
|
||||||
|
if db2.roFilter != db3.roFilter:
|
||||||
|
check db2.roFilter == db3.roFilter
|
||||||
|
return
|
||||||
|
|
||||||
|
block:
|
||||||
|
let rc = db2.stow(persistent=false)
|
||||||
|
if rc.isErr:
|
||||||
|
noisy.say "*** testDistributedAccess (3)", "n=", n, "db2".dump db2
|
||||||
|
check rc.error == (0,0)
|
||||||
|
return
|
||||||
|
if db1.roFilter != AristoFilterRef(nil):
|
||||||
|
check db1.roFilter == AristoFilterRef(nil)
|
||||||
|
return
|
||||||
|
if db2.roFilter == db3.roFilter:
|
||||||
|
check db2.roFilter != db3.roFilter
|
||||||
|
return
|
||||||
|
|
||||||
|
# Clause (11) from `aristo/README.md` example
|
||||||
|
block:
|
||||||
|
let rc = db2.ackqRwMode()
|
||||||
|
if rc.isErr:
|
||||||
|
check rc.error == 0
|
||||||
|
return
|
||||||
|
block:
|
||||||
|
let rc = db2.stow(persistent=true)
|
||||||
|
if rc.isErr:
|
||||||
|
check rc.error == (0,0)
|
||||||
|
return
|
||||||
|
if db2.roFilter != AristoFilterRef(nil):
|
||||||
|
check db2.roFilter == AristoFilterRef(nil)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Check/verify backends
|
||||||
|
block:
|
||||||
|
let ok = dx.checkBeOk(noisy=noisy)
|
||||||
|
if not ok:
|
||||||
|
noisy.say "*** testDistributedAccess (4)", "n=", n, "db3".dump db3
|
||||||
|
check ok
|
||||||
|
return
|
||||||
|
|
||||||
|
# Capture filters from clause (11)
|
||||||
|
c11Filter1 = db1.roFilter
|
||||||
|
c11Filter3 = db3.roFilter
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
dx.cleanUp()
|
||||||
|
|
||||||
|
# ----------
|
||||||
|
|
||||||
|
# Work through clauses (12)..(15) from `aristo/README.md` example
|
||||||
|
block:
|
||||||
|
let
|
||||||
|
dy = block:
|
||||||
|
let rc = dbTriplet(w, rdbPath)
|
||||||
|
if rc.isErr:
|
||||||
|
return
|
||||||
|
rc.value
|
||||||
|
(db1, db2, db3) = (dy.db1, dy.db2, dy.db3)
|
||||||
|
defer:
|
||||||
|
dy.cleanUp()
|
||||||
|
|
||||||
|
# Build clause (12) from `aristo/README.md` example
|
||||||
|
block:
|
||||||
|
let rc = db2.ackqRwMode()
|
||||||
|
if rc.isErr:
|
||||||
|
check rc.error == 0
|
||||||
|
return
|
||||||
|
block:
|
||||||
|
let rc = db2.stow(persistent=true)
|
||||||
|
if rc.isErr:
|
||||||
|
check rc.error == (0,0)
|
||||||
|
return
|
||||||
|
if db2.roFilter != AristoFilterRef(nil):
|
||||||
|
check db1.roFilter == AristoFilterRef(nil)
|
||||||
|
return
|
||||||
|
if db1.roFilter != db3.roFilter:
|
||||||
|
check db1.roFilter == db3.roFilter
|
||||||
|
return
|
||||||
|
|
||||||
|
# Clause (13) from `aristo/README.md` example
|
||||||
|
block:
|
||||||
|
let rc = db1.stow(persistent=false)
|
||||||
|
if rc.isErr:
|
||||||
|
check rc.error == (0,0)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Clause (14) from `aristo/README.md` check
|
||||||
|
block:
|
||||||
|
let c11Filter1_eq_db1RoFilter = c11Filter1.eq(db1.roFilter, db1, noisy)
|
||||||
|
if not c11Filter1_eq_db1RoFilter:
|
||||||
|
noisy.say "*** testDistributedAccess (7)", "n=", n,
|
||||||
|
"\n c11Filter1=", c11Filter3.pp(db1),
|
||||||
|
"db1".dump(db1)
|
||||||
|
check c11Filter1_eq_db1RoFilter
|
||||||
|
return
|
||||||
|
|
||||||
|
# Clause (15) from `aristo/README.md` check
|
||||||
|
block:
|
||||||
|
let c11Filter3_eq_db3RoFilter = c11Filter3.eq(db3. roFilter, db3, noisy)
|
||||||
|
if not c11Filter3_eq_db3RoFilter:
|
||||||
|
noisy.say "*** testDistributedAccess (8)", "n=", n,
|
||||||
|
"\n c11Filter3=", c11Filter3.pp(db3),
|
||||||
|
"db3".dump(db3)
|
||||||
|
check c11Filter3_eq_db3RoFilter
|
||||||
|
return
|
||||||
|
|
||||||
|
# Check/verify backends
|
||||||
|
block:
|
||||||
|
let ok = dy.checkBeOk(noisy=noisy)
|
||||||
|
if not ok:
|
||||||
|
check ok
|
||||||
|
return
|
||||||
|
|
||||||
|
when false: # or true:
|
||||||
|
noisy.say "*** testDistributedAccess (9)", "n=", n, dy.dump
|
||||||
|
|
||||||
|
true
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# End
|
||||||
|
# ------------------------------------------------------------------------------
|
|
@ -9,12 +9,12 @@
|
||||||
# at your option. This file may not be copied, modified, or
|
# at your option. This file may not be copied, modified, or
|
||||||
# distributed except according to those terms.
|
# distributed except according to those terms.
|
||||||
|
|
||||||
## Aristo (aka Patricia) DB records merge test
|
## Aristo (aka Patricia) DB records transaction based merge test
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[algorithm, bitops, sequtils, sets, tables],
|
std/[algorithm, bitops, sequtils, sets, tables],
|
||||||
eth/common,
|
eth/common,
|
||||||
stew/results,
|
results,
|
||||||
unittest2,
|
unittest2,
|
||||||
../../nimbus/db/aristo/[
|
../../nimbus/db/aristo/[
|
||||||
aristo_check, aristo_delete, aristo_desc, aristo_get, aristo_merge],
|
aristo_check, aristo_delete, aristo_desc, aristo_get, aristo_merge],
|
||||||
|
|
Loading…
Reference in New Issue