Added portal proof nodes generation functionality (#2539)
* Extracted `test_tx.testTxMergeProofAndKvpList()` => separate file * Fix serialiser why: Typo lead to duplicate rlp-encoded nodes in chain * Remove cruft * Implemnt portal proof nodes generators `partXxxTwig()` * Add unit test for portal proof nodes generator `partAccountTwig()` * Cosmetics * Simplify serialiser return code format * Fix proof generator for extension nodes why: Code was simply bonkers, not detected before the unit tests were adapted to check for just this. * Implemented portal proof nodes verifier `partUntwig()` * Cosmetics * Fix `testutp` cli poblem
This commit is contained in:
parent
ec118a438a
commit
5b502a06c4
|
@ -80,6 +80,8 @@ jobs:
|
|||
|
||||
- name: run test app with simulator
|
||||
run: |
|
||||
: find / -name docker-compose -printf "%h\n%f\n%m\n\n" 2>/dev/null
|
||||
PATH=$PATH$(find /usr/libexec/docker -name docker-compose -printf ":%h")
|
||||
SCENARIO="drop-rate --delay=15ms --bandwidth=10Mbps --queue=25 --rate_to_client=10 --rate_to_server=10" docker-compose -f fluffy/tools/utp_testing/docker/docker-compose.yml up -d
|
||||
|
||||
- name: wait 5 seconds for containers to start
|
||||
|
@ -98,7 +100,9 @@ jobs:
|
|||
|
||||
- name: Stop containers
|
||||
if: always()
|
||||
run: docker-compose -f fluffy/tools/utp_testing/docker/docker-compose.yml down
|
||||
run: |
|
||||
PATH=$PATH$(find /usr/libexec/docker -name docker-compose -printf ":%h")
|
||||
docker-compose -f fluffy/tools/utp_testing/docker/docker-compose.yml down
|
||||
|
||||
build:
|
||||
strategy:
|
||||
|
|
|
@ -152,10 +152,10 @@ type
|
|||
|
||||
|
||||
# Part/proof node errors
|
||||
PartArgNotInCore
|
||||
PartArgNotGenericRoot
|
||||
PartArgRootAlreadyUsed
|
||||
PartArgNotInCore
|
||||
PartArgRootAlreadyOnDatabase
|
||||
PartArgRootAlreadyUsed
|
||||
PartChkChangedKeyNotInKeyTab
|
||||
PartChkChangedVtxMissing
|
||||
PartChkCoreKeyLookupFailed
|
||||
|
@ -169,6 +169,10 @@ type
|
|||
PartChkVidKeyTabLengthsDiffer
|
||||
PartChkVidTabCoreRootMissing
|
||||
PartChkVidTabVidMissing
|
||||
PartChnBranchPathExhausted
|
||||
PartChnExtPfxMismatch
|
||||
PartChnLeafPathMismatch
|
||||
PartChnNodeConvError
|
||||
PartCtxNotAvailable
|
||||
PartCtxStaleDescriptor
|
||||
PartExtVtxExistsAlready
|
||||
|
@ -190,6 +194,13 @@ type
|
|||
PartRlpPayloadException
|
||||
PartRootKeysDontMatch
|
||||
PartRootVidsDontMatch
|
||||
PartTrkEmptyPath
|
||||
PartTrkFollowUpKeyMismatch
|
||||
PartTrkGarbledNode
|
||||
PartTrkLeafPfxMismatch
|
||||
PartTrkLinkExpected
|
||||
PartTrkPayloadMismatch
|
||||
PartTrkRlpError
|
||||
PartVtxSlotWasModified
|
||||
PartVtxSlotWasNotModified
|
||||
|
||||
|
|
|
@ -47,31 +47,12 @@ proc kind*(
|
|||
# Public database constuctors, destructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc init*(
|
||||
T: type AristoDbRef; # Target type
|
||||
B: type MemBackendRef; # Backend type
|
||||
): T =
|
||||
## Memory backend constructor.
|
||||
##
|
||||
## If the `qidLayout` argument is set `QidLayoutRef(nil)`, the a backend
|
||||
## database will not provide filter history management. Providing a different
|
||||
## scheduler layout shoud be used with care as table access with different
|
||||
## layouts might render the filter history data unmanageable.
|
||||
##
|
||||
when B is MemBackendRef:
|
||||
AristoDbRef(top: LayerRef.init(), backend: memoryBackend())
|
||||
|
||||
proc init*(
|
||||
T: type AristoDbRef; # Target type
|
||||
B: type MemOnlyBackend; # Backend type
|
||||
): T =
|
||||
## Memory backend constructor.
|
||||
##
|
||||
## If the `qidLayout` argument is set `QidLayoutRef(nil)`, the a backend
|
||||
## database will not provide filter history management. Providing a different
|
||||
## scheduler layout shoud be used with care as table access with different
|
||||
## layouts might render the filter history data unmanageable.
|
||||
##
|
||||
when B is VoidBackendRef:
|
||||
AristoDbRef(top: LayerRef.init())
|
||||
|
||||
|
|
|
@ -17,9 +17,10 @@ import
|
|||
std/[sets, sequtils],
|
||||
eth/common,
|
||||
results,
|
||||
"."/[aristo_desc, aristo_get, aristo_merge, aristo_layers, aristo_utils],
|
||||
"."/[aristo_desc, aristo_fetch, aristo_get, aristo_merge, aristo_layers,
|
||||
aristo_utils],
|
||||
#./aristo_part/part_debug,
|
||||
./aristo_part/[part_ctx, part_desc, part_helpers]
|
||||
./aristo_part/[part_chain_rlp, part_ctx, part_desc, part_helpers]
|
||||
|
||||
export
|
||||
PartStateCtx,
|
||||
|
@ -35,22 +36,118 @@ proc roots*(ps: PartStateRef): seq[VertexID] =
|
|||
## Getter: list of root vertex IDs from `ps`.
|
||||
ps.core.keys.toSeq
|
||||
|
||||
iterator perimeter*(ps: PartStateRef; root: VertexID): VertexID =
|
||||
iterator perimeter*(
|
||||
ps: PartStateRef;
|
||||
root: VertexID;
|
||||
): (RootedVertexID, HashKey) =
|
||||
## Retrieve the list of dangling vertex IDs relative to `ps`.
|
||||
ps.core.withValue(root,keys):
|
||||
for (key,rvid) in ps.byKey.pairs:
|
||||
if rvid.root == root and key notin keys[] and key notin ps.changed:
|
||||
yield rvid.vid
|
||||
yield (rvid,key)
|
||||
|
||||
iterator vkPairs*(ps: PartStateRef): (RootedVertexID,HashKey) =
|
||||
iterator updated*(
|
||||
ps: PartStateRef;
|
||||
root: VertexID;
|
||||
): (RootedVertexID, HashKey) =
|
||||
## Retrieve the list of changed vertex IDs relative to `ps`. These vertices
|
||||
## IDs are not considered on the perimeter, anymore.
|
||||
for key in ps.changed:
|
||||
let rvid = ps[key]
|
||||
if rvid.root == root:
|
||||
yield (rvid,key)
|
||||
|
||||
iterator vkPairs*(ps: PartStateRef): (RootedVertexID, HashKey) =
|
||||
## Retrieve the list of cached `(key,vertex-ID)` pairs.
|
||||
for (k,v) in ps.byKey.pairs:
|
||||
yield (v,k)
|
||||
for (key, rvid) in ps.byKey.pairs:
|
||||
yield (rvid, key)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc partGenericTwig*(
|
||||
db: AristoDbRef;
|
||||
root: VertexID;
|
||||
path: NibblesBuf;
|
||||
): Result[seq[Blob], AristoError] =
|
||||
## This function returns a chain of rlp-encoded nodes along the argument
|
||||
## path `(root,path)`.
|
||||
##
|
||||
var chain: seq[Blob]
|
||||
? db.chainRlpNodes((root,root), path, chain)
|
||||
ok chain
|
||||
|
||||
proc partGenericTwig*(
|
||||
db: AristoDbRef;
|
||||
root: VertexID;
|
||||
path: openArray[byte];
|
||||
): Result[seq[Blob], AristoError] =
|
||||
## Variant of `partGenericTwig()`.
|
||||
##
|
||||
## Note: This function provides a functionality comparable to the
|
||||
## `getBranch()` function from `hexary.nim`
|
||||
##
|
||||
db.partGenericTwig(root, NibblesBuf.fromBytes path)
|
||||
|
||||
proc partAccountTwig*(
|
||||
db: AristoDbRef;
|
||||
accPath: Hash256;
|
||||
): Result[seq[Blob], AristoError] =
|
||||
## Variant of `partGetBranch()`.
|
||||
db.partGenericTwig(VertexID(1), NibblesBuf.fromBytes accPath.data)
|
||||
|
||||
proc partStorageTwig*(
|
||||
db: AristoDbRef;
|
||||
accPath: Hash256;
|
||||
stoPath: Hash256;
|
||||
): Result[seq[Blob], AristoError] =
|
||||
## Variant of `partGetBranch()`.
|
||||
let vid = ? db.fetchStorageID accPath
|
||||
db.partGenericTwig(vid, NibblesBuf.fromBytes stoPath.data)
|
||||
|
||||
# ----------
|
||||
|
||||
proc partUntwig*(
|
||||
chain: openArray[Blob];
|
||||
root: Hash256;
|
||||
path: openArray[byte];
|
||||
): Result[Blob,AristoError] =
|
||||
try:
|
||||
let nibbles = NibblesBuf.fromBytes path
|
||||
return chain.trackRlpNodes(root.to(HashKey), nibbles, start=true)
|
||||
except RlpError as e:
|
||||
return err(PartTrkRlpError)
|
||||
|
||||
proc partUntwig*(
|
||||
chain: openArray[Blob];
|
||||
root: Hash256;
|
||||
path: Hash256;
|
||||
): Result[Blob,AristoError] =
|
||||
chain.partUntwig(root, path.data)
|
||||
|
||||
|
||||
proc partUntwigOk*(
|
||||
chain: openArray[Blob];
|
||||
root: Hash256;
|
||||
path: openArray[byte];
|
||||
payload: openArray[byte];
|
||||
): Result[void,AristoError] =
|
||||
if payload == ? chain.partUntwig(root, path):
|
||||
ok()
|
||||
else:
|
||||
err(PartTrkPayloadMismatch)
|
||||
|
||||
proc partUntwigOk*(
|
||||
chain: openArray[Blob];
|
||||
root: Hash256;
|
||||
path: Hash256;
|
||||
payload: openArray[byte];
|
||||
): Result[void,AristoError] =
|
||||
chain.partUntwigOk(root, path.data, payload)
|
||||
|
||||
# ----------------
|
||||
|
||||
proc partPut*(
|
||||
ps: PartStateRef; # Partial database descriptor
|
||||
proof: openArray[Blob]; # RLP encoded proof nodes
|
||||
|
@ -67,6 +164,11 @@ proc partPut*(
|
|||
# Check wether the chain has an accounts leaf node
|
||||
? ps.updateAccountsTree(nodes, bl, mode)
|
||||
|
||||
when false: # or true:
|
||||
echo ">>> partPut",
|
||||
"\n chains\n ", bl.chains.pp(ps),
|
||||
""
|
||||
|
||||
# Assign vertex IDs. If possible, use IDs from `state` lookup
|
||||
var seen: HashSet[HashKey]
|
||||
for chain in bl.chains:
|
||||
|
|
|
@ -0,0 +1,107 @@
|
|||
# nimbus-eth1
|
||||
# Copyright (c) 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
eth/common,
|
||||
results,
|
||||
".."/[aristo_desc, aristo_get, aristo_utils, aristo_compute, aristo_serialise]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc chainRlpNodes*(
|
||||
db: AristoDbRef;
|
||||
rvid: RootedVertexID;
|
||||
path: NibblesBuf,
|
||||
chain: var seq[Blob];
|
||||
): Result[void,AristoError] =
|
||||
## Inspired by the `getBranchAux()` function from `hexary.nim`
|
||||
let
|
||||
key = ? db.computeKey rvid
|
||||
(vtx,_) = ? db.getVtxRc rvid
|
||||
node = vtx.toNode(rvid.root, db).valueOr:
|
||||
return err(PartChnNodeConvError)
|
||||
|
||||
# Save rpl encoded node(s)
|
||||
chain &= node.to(seq[Blob])
|
||||
|
||||
# Follow up child node
|
||||
case vtx.vType:
|
||||
of Leaf:
|
||||
if path != vtx.lPfx:
|
||||
err(PartChnLeafPathMismatch)
|
||||
else:
|
||||
ok()
|
||||
|
||||
of Branch:
|
||||
let nChewOff = sharedPrefixLen(vtx.ePfx, path)
|
||||
if nChewOff != vtx.ePfx.len:
|
||||
err(PartChnExtPfxMismatch)
|
||||
elif path.len == nChewOff:
|
||||
err(PartChnBranchPathExhausted)
|
||||
else:
|
||||
let
|
||||
nibble = path[nChewOff]
|
||||
rest = path.slice(nChewOff+1)
|
||||
# Recursion!
|
||||
db.chainRlpNodes((rvid.root,vtx.bVid[nibble]), rest, chain)
|
||||
|
||||
|
||||
proc trackRlpNodes*(
|
||||
chain: openArray[Blob];
|
||||
topKey: HashKey;
|
||||
path: NibblesBuf;
|
||||
start = false;
|
||||
): Result[Blob,AristoError]
|
||||
{.gcsafe, raises: [RlpError]} =
|
||||
## Verify rlp-encoded node chain created by `chainRlpNodes()`.
|
||||
if path.len == 0:
|
||||
return err(PartTrkEmptyPath)
|
||||
|
||||
# Verify key against rlp-node
|
||||
let digest = chain[0].digestTo(HashKey)
|
||||
if start:
|
||||
if topKey.to(Hash256) != digest.to(Hash256):
|
||||
return err(PartTrkFollowUpKeyMismatch)
|
||||
else:
|
||||
if topKey != digest:
|
||||
return err(PartTrkFollowUpKeyMismatch)
|
||||
|
||||
var
|
||||
node = rlpFromBytes chain[0]
|
||||
nChewOff = 0
|
||||
link: Blob
|
||||
|
||||
# Decode rlp-node and prepare for recursion
|
||||
case node.listLen
|
||||
of 2:
|
||||
let (isLeaf, segm) = NibblesBuf.fromHexPrefix node.listElem(0).toBytes
|
||||
nChewOff = sharedPrefixLen(path, segm)
|
||||
link = node.listElem(1).toBytes # link or payload
|
||||
if isLeaf:
|
||||
if nChewOff == path.len:
|
||||
return ok(link)
|
||||
return err(PartTrkLeafPfxMismatch)
|
||||
of 17:
|
||||
nChewOff = 1
|
||||
link = node.listElem(path[0].int).toBytes
|
||||
else:
|
||||
return err(PartTrkGarbledNode)
|
||||
|
||||
let nextKey = HashKey.fromBytes(link).valueOr:
|
||||
return err(PartTrkLinkExpected)
|
||||
chain.toOpenArray(1,chain.len-1).trackRlpNodes(nextKey, path.slice nChewOff)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
|
@ -112,6 +112,7 @@ proc `[]`*(ps: PartStateRef; key: HashKey): RootedVertexID =
|
|||
proc `[]`*(ps: PartStateRef; vid: VertexID): HashKey =
|
||||
ps.byVid.withValue(vid,key):
|
||||
return key[]
|
||||
VOID_HASH_KEY
|
||||
|
||||
|
||||
proc del*(ps: PartStateRef; key: HashKey) =
|
||||
|
|
|
@ -22,74 +22,74 @@ import
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc read(rlp: var Rlp; T: type PrfNode): T {.gcsafe, raises: [RlpError].} =
|
||||
## Mixin for RLP reader. The decoder with error return code in a `Leaf`
|
||||
## node if needed.
|
||||
##
|
||||
func readError(error: AristoError): PrfNode =
|
||||
## Prettify return code expression
|
||||
PrfNode(vType: Leaf, prfType: isError, error: error)
|
||||
## Mixin for RLP reader. The decoder with error return code in a `Leaf`
|
||||
## node if needed.
|
||||
##
|
||||
func readError(error: AristoError): PrfNode =
|
||||
## Prettify return code expression
|
||||
PrfNode(vType: Leaf, prfType: isError, error: error)
|
||||
|
||||
if not rlp.isList:
|
||||
# Otherwise `rlp.items` would raise a `Defect`
|
||||
return readError(PartRlp2Or17ListEntries)
|
||||
if not rlp.isList:
|
||||
# Otherwise `rlp.items` would raise a `Defect`
|
||||
return readError(PartRlp2Or17ListEntries)
|
||||
|
||||
var
|
||||
blobs = newSeq[Blob](2) # temporary, cache
|
||||
links: array[16,HashKey] # reconstruct branch node
|
||||
top = 0 # count entries and positions
|
||||
var
|
||||
blobs = newSeq[Blob](2) # temporary, cache
|
||||
links: array[16,HashKey] # reconstruct branch node
|
||||
top = 0 # count entries and positions
|
||||
|
||||
# Collect lists of either 2 or 17 blob entries.
|
||||
for w in rlp.items:
|
||||
case top
|
||||
of 0, 1:
|
||||
if not w.isBlob:
|
||||
return readError(PartRlpBlobExpected)
|
||||
blobs[top] = rlp.read(Blob)
|
||||
of 2 .. 15:
|
||||
let blob = rlp.read(Blob)
|
||||
links[top] = HashKey.fromBytes(blob).valueOr:
|
||||
return readError(PartRlpBranchHashKeyExpected)
|
||||
of 16:
|
||||
if not w.isBlob or 0 < rlp.read(Blob).len:
|
||||
return readError(PartRlpEmptyBlobExpected)
|
||||
else:
|
||||
return readError(PartRlp2Or17ListEntries)
|
||||
top.inc
|
||||
|
||||
# Verify extension data
|
||||
# Collect lists of either 2 or 17 blob entries.
|
||||
for w in rlp.items:
|
||||
case top
|
||||
of 2:
|
||||
if blobs[0].len == 0:
|
||||
return readError(PartRlpNonEmptyBlobExpected)
|
||||
let (isLeaf, pathSegment) = NibblesBuf.fromHexPrefix blobs[0]
|
||||
if isLeaf:
|
||||
return PrfNode(
|
||||
vType: Leaf,
|
||||
prfType: ignore,
|
||||
lPfx: pathSegment,
|
||||
lData: LeafPayload(
|
||||
pType: RawData,
|
||||
rawBlob: blobs[1]))
|
||||
else:
|
||||
var node = PrfNode(
|
||||
vType: Branch,
|
||||
prfType: isExtension,
|
||||
ePfx: pathSegment)
|
||||
node.key[0] = HashKey.fromBytes(blobs[1]).valueOr:
|
||||
return readError(PartRlpExtHashKeyExpected)
|
||||
return node
|
||||
of 17:
|
||||
for n in [0,1]:
|
||||
links[n] = HashKey.fromBytes(blobs[n]).valueOr:
|
||||
return readError(PartRlpBranchHashKeyExpected)
|
||||
return PrfNode(
|
||||
vType: Branch,
|
||||
prfType: ignore,
|
||||
key: links)
|
||||
of 0, 1:
|
||||
if not w.isBlob:
|
||||
return readError(PartRlpBlobExpected)
|
||||
blobs[top] = rlp.read(Blob)
|
||||
of 2 .. 15:
|
||||
let blob = rlp.read(Blob)
|
||||
links[top] = HashKey.fromBytes(blob).valueOr:
|
||||
return readError(PartRlpBranchHashKeyExpected)
|
||||
of 16:
|
||||
if not w.isBlob or 0 < rlp.read(Blob).len:
|
||||
return readError(PartRlpEmptyBlobExpected)
|
||||
else:
|
||||
discard
|
||||
return readError(PartRlp2Or17ListEntries)
|
||||
top.inc
|
||||
|
||||
readError(PartRlp2Or17ListEntries)
|
||||
# Verify extension data
|
||||
case top
|
||||
of 2:
|
||||
if blobs[0].len == 0:
|
||||
return readError(PartRlpNonEmptyBlobExpected)
|
||||
let (isLeaf, pathSegment) = NibblesBuf.fromHexPrefix blobs[0]
|
||||
if isLeaf:
|
||||
return PrfNode(
|
||||
vType: Leaf,
|
||||
prfType: ignore,
|
||||
lPfx: pathSegment,
|
||||
lData: LeafPayload(
|
||||
pType: RawData,
|
||||
rawBlob: blobs[1]))
|
||||
else:
|
||||
var node = PrfNode(
|
||||
vType: Branch,
|
||||
prfType: isExtension,
|
||||
ePfx: pathSegment)
|
||||
node.key[0] = HashKey.fromBytes(blobs[1]).valueOr:
|
||||
return readError(PartRlpExtHashKeyExpected)
|
||||
return node
|
||||
of 17:
|
||||
for n in [0,1]:
|
||||
links[n] = HashKey.fromBytes(blobs[n]).valueOr:
|
||||
return readError(PartRlpBranchHashKeyExpected)
|
||||
return PrfNode(
|
||||
vType: Branch,
|
||||
prfType: ignore,
|
||||
key: links)
|
||||
else:
|
||||
discard
|
||||
|
||||
readError(PartRlp2Or17ListEntries)
|
||||
|
||||
proc read(rlp: var Rlp; T: type PrfPayload): T {.gcsafe, raises: [RlpError].} =
|
||||
## Mixin for RLP reader decoding `Account` or storage slot payload.
|
||||
|
|
|
@ -67,44 +67,49 @@ func append*(w: var RlpWriter; key: HashKey) =
|
|||
|
||||
# ---------------------
|
||||
|
||||
proc to*(w: tuple[key: HashKey, node: NodeRef]; T: type seq[(Blob,Blob)]): T =
|
||||
## Convert the argument pait `w` to a single or a double pair of
|
||||
## `(<key>,<rlp-encoded-node>)` tuples. Only in case of a combined extension
|
||||
## and branch vertex argument, there are is a double pair result.
|
||||
var wr = initRlpWriter()
|
||||
case w.node.vType:
|
||||
proc to*(node: NodeRef; T: type seq[Blob]): T =
|
||||
## Convert the argument pait `w` to a single or a double item list item of
|
||||
## `<rlp-encoded-node>` type entries. Only in case of a combined extension
|
||||
## and branch vertex argument, there will be a double item list result.
|
||||
##
|
||||
case node.vType:
|
||||
of Branch:
|
||||
# Do branch node
|
||||
var wr = initRlpWriter()
|
||||
wr.startList(17)
|
||||
for n in 0..15:
|
||||
wr.append w.node.key[n]
|
||||
wr.append node.key[n]
|
||||
wr.append EmptyBlob
|
||||
let brData = wr.finish()
|
||||
|
||||
if 0 < w.node.ePfx.len:
|
||||
# Do for embedded extension node
|
||||
let brHash = wr.finish().digestTo(HashKey)
|
||||
result.add (@(brHash.data), wr.finish())
|
||||
if 0 < node.ePfx.len:
|
||||
# Prefix branch by embedded extension node
|
||||
let brHash = brData.digestTo(HashKey)
|
||||
|
||||
wr = initRlpWriter()
|
||||
wr.startList(2)
|
||||
wr.append w.node.ePfx.toHexPrefix(isleaf = false)
|
||||
wr.append brHash
|
||||
var wrx = initRlpWriter()
|
||||
wrx.startList(2)
|
||||
wrx.append node.ePfx.toHexPrefix(isleaf = false)
|
||||
wrx.append brHash
|
||||
|
||||
result.add wrx.finish()
|
||||
result.add brData
|
||||
else:
|
||||
# Do for pure branch node
|
||||
result.add (@(w.key.data), wr.finish())
|
||||
result.add brData
|
||||
|
||||
of Leaf:
|
||||
proc getKey0(
|
||||
vid: VertexID;
|
||||
): Result[HashKey,AristoError]
|
||||
{.gcsafe, raises: [].} =
|
||||
ok(w.node.key[0]) # always succeeds
|
||||
ok(node.key[0]) # always succeeds
|
||||
|
||||
var wr = initRlpWriter()
|
||||
wr.startList(2)
|
||||
wr.append w.node.lPfx.toHexPrefix(isleaf = true)
|
||||
wr.append w.node.lData.serialise(getKey0).value
|
||||
wr.append node.lPfx.toHexPrefix(isleaf = true)
|
||||
wr.append node.lData.serialise(getKey0).value
|
||||
|
||||
result.add (@(w.key.data), wr.finish())
|
||||
result.add (wr.finish())
|
||||
|
||||
proc digestTo*(node: NodeRef; T: type HashKey): T =
|
||||
## Convert the argument `node` to the corresponding Merkle hash key. Note
|
||||
|
@ -122,7 +127,7 @@ proc digestTo*(node: NodeRef; T: type HashKey): T =
|
|||
# Do for embedded extension node
|
||||
if 0 < node.ePfx.len:
|
||||
let brHash = wr.finish().digestTo(HashKey)
|
||||
wr= initRlpWriter()
|
||||
wr = initRlpWriter()
|
||||
wr.startList(2)
|
||||
wr.append node.ePfx.toHexPrefix(isleaf = false)
|
||||
wr.append brHash
|
||||
|
|
|
@ -65,7 +65,10 @@ iterator aristoReplicate[T](
|
|||
let p = mpt.call(forkTx, mpt.mpt, 0).valueOrApiError "aristoReplicate()"
|
||||
defer: discard mpt.call(forget, p)
|
||||
for (rVid,key,vtx,node) in T.replicate(p):
|
||||
for (k,v) in (key,node).to(seq[(Blob,Blob)]):
|
||||
yield (k, v)
|
||||
let w = node.to(seq[Blob])
|
||||
yield (@(key.data),w[0])
|
||||
if 1 < w.len:
|
||||
# Was an extension merged into a branch
|
||||
yield (@(w[1].digestTo(HashKey).data),w[1])
|
||||
|
||||
# End
|
||||
|
|
|
@ -18,8 +18,10 @@ import
|
|||
unittest2,
|
||||
../nimbus/db/aristo/aristo_desc,
|
||||
./replay/[pp, undump_accounts, undump_storages],
|
||||
./test_aristo/test_short_keys,
|
||||
./test_aristo/test_blobify,
|
||||
./test_aristo/test_merge_proof,
|
||||
./test_aristo/test_portal_proof,
|
||||
./test_aristo/test_short_keys,
|
||||
./test_aristo/[test_balancer, test_helpers, test_samples_xx, test_tx]
|
||||
|
||||
const
|
||||
|
@ -76,14 +78,12 @@ proc setErrorLevel {.used.} =
|
|||
proc accountsRunner(
|
||||
noisy = true;
|
||||
sample = accSample;
|
||||
resetDb = false;
|
||||
cmpBackends = true;
|
||||
persistent = true;
|
||||
) =
|
||||
let
|
||||
accLst = sample.to(seq[UndumpAccounts]).to(seq[ProofTrieData])
|
||||
fileInfo = sample.file.splitPath.tail.replace(".txt.gz","")
|
||||
listMode = if resetDb: "" else: ", merged dumps"
|
||||
baseDir = getTmpDir() / sample.name & "-accounts"
|
||||
dbDir = if persistent: baseDir / "tmp" else: ""
|
||||
isPersistent = if persistent: "persistent DB" else: "mem DB only"
|
||||
|
@ -91,10 +91,10 @@ proc accountsRunner(
|
|||
defer:
|
||||
try: baseDir.removeDir except CatchableError: discard
|
||||
|
||||
suite &"Aristo: accounts data dump from {fileInfo}{listMode}, {isPersistent}":
|
||||
suite &"Aristo: accounts data dump from {fileInfo}, {isPersistent}":
|
||||
|
||||
test &"Merge {accLst.len} proof & account lists to database":
|
||||
check noisy.testTxMergeProofAndKvpList(accLst, dbDir, resetDb)
|
||||
check noisy.testMergeProofAndKvpList(accLst, dbDir)
|
||||
|
||||
test &"Delete accounts database successively, {accLst.len} lists":
|
||||
check noisy.testTxMergeAndDeleteOneByOne(accLst, dbDir)
|
||||
|
@ -109,15 +109,12 @@ proc accountsRunner(
|
|||
proc storagesRunner(
|
||||
noisy = true;
|
||||
sample = storSample;
|
||||
resetDb = false;
|
||||
oops: KnownHasherFailure = @[];
|
||||
cmpBackends = true;
|
||||
persistent = true;
|
||||
) =
|
||||
let
|
||||
stoLst = sample.to(seq[UndumpStorages]).to(seq[ProofTrieData])
|
||||
fileInfo = sample.file.splitPath.tail.replace(".txt.gz","")
|
||||
listMode = if resetDb: "" else: ", merged dumps"
|
||||
baseDir = getTmpDir() / sample.name & "-storage"
|
||||
dbDir = if persistent: baseDir / "tmp" else: ""
|
||||
isPersistent = if persistent: "persistent DB" else: "mem DB only"
|
||||
|
@ -125,11 +122,10 @@ proc storagesRunner(
|
|||
defer:
|
||||
try: baseDir.removeDir except CatchableError: discard
|
||||
|
||||
suite &"Aristo: storages data dump from {fileInfo}{listMode}, {isPersistent}":
|
||||
suite &"Aristo: storages data dump from {fileInfo}, {isPersistent}":
|
||||
|
||||
test &"Merge {stoLst.len} proof & slots lists to database":
|
||||
check noisy.testTxMergeProofAndKvpList(
|
||||
stoLst, dbDir, resetDb, fileInfo, oops)
|
||||
test &"Merge {stoLst.len} proof & slot lists to database":
|
||||
check noisy.testMergeProofAndKvpList(stoLst, dbDir, fileInfo)
|
||||
|
||||
test &"Delete storage database successively, {stoLst.len} lists":
|
||||
check noisy.testTxMergeAndDeleteOneByOne(stoLst, dbDir)
|
||||
|
|
|
@ -13,9 +13,8 @@ import
|
|||
eth/common,
|
||||
stew/endians2,
|
||||
../../nimbus/db/aristo/[
|
||||
aristo_debug, aristo_desc, aristo_hike, aristo_merge],
|
||||
../../nimbus/db/kvstore_rocksdb,
|
||||
../../nimbus/sync/protocol/snap/snap_types,
|
||||
aristo_debug, aristo_desc, aristo_hike, aristo_layers, aristo_merge,
|
||||
aristo_tx],
|
||||
../replay/[pp, undump_accounts, undump_storages],
|
||||
./test_samples_xx
|
||||
|
||||
|
@ -29,6 +28,10 @@ type
|
|||
proof*: seq[Blob]
|
||||
kvpLst*: seq[LeafTiePayload]
|
||||
|
||||
const
|
||||
MaxFilterBulk = 150_000
|
||||
## Policy setting for `schedStow()`
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -201,6 +204,22 @@ func mapRootVid*(
|
|||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc schedStow*(
|
||||
db: AristoDbRef; # Database
|
||||
): Result[void,AristoError] =
|
||||
## Context based scheduled persistent/non-persistent storage.
|
||||
let
|
||||
layersMeter = db.nLayersVtx() + db.nLayersKey()
|
||||
filterMeter = if db.balancer.isNil: 0
|
||||
else: db.balancer.sTab.len + db.balancer.kMap.len
|
||||
persistent = MaxFilterBulk < max(layersMeter, filterMeter)
|
||||
if persistent:
|
||||
db.persist()
|
||||
else:
|
||||
db.stow()
|
||||
|
||||
# ------------------
|
||||
|
||||
proc mergeGenericData*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
leaf: LeafTiePayload; # Leaf item to add to the database
|
||||
|
|
|
@ -0,0 +1,157 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or
|
||||
# distributed except according to those terms.
|
||||
|
||||
import
|
||||
eth/common,
|
||||
results,
|
||||
unittest2,
|
||||
../../nimbus/db/opts,
|
||||
../../nimbus/db/core_db/backend/aristo_rocksdb,
|
||||
../../nimbus/db/aristo/[
|
||||
aristo_check,
|
||||
aristo_desc,
|
||||
aristo_init/persistent,
|
||||
aristo_part,
|
||||
aristo_part/part_debug,
|
||||
aristo_tx],
|
||||
../replay/xcheck,
|
||||
./test_helpers
|
||||
|
||||
const
|
||||
testRootVid = VertexID(2)
|
||||
## Need to reconfigure for the test, root ID 1 cannot be deleted as a trie
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helper
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc innerCleanUp(ps: var PartStateRef) =
|
||||
if not ps.isNil:
|
||||
ps.db.finish(eradicate=true)
|
||||
ps = PartStateRef(nil)
|
||||
|
||||
# -----------------------
|
||||
|
||||
proc saveToBackend(
|
||||
tx: var AristoTxRef;
|
||||
noisy: bool;
|
||||
debugID: int;
|
||||
): bool =
|
||||
var db = tx.to(AristoDbRef)
|
||||
|
||||
# Verify context: nesting level must be 2 (i.e. two transactions)
|
||||
xCheck tx.level == 2
|
||||
|
||||
# Commit and hashify the current layer
|
||||
block:
|
||||
let rc = tx.commit()
|
||||
xCheckRc rc.error == 0
|
||||
|
||||
block:
|
||||
let rc = db.txTop()
|
||||
xCheckRc rc.error == 0
|
||||
tx = rc.value
|
||||
|
||||
# Verify context: nesting level must be 1 (i.e. one transaction)
|
||||
xCheck tx.level == 1
|
||||
|
||||
block:
|
||||
let rc = db.checkBE()
|
||||
xCheckRc rc.error == (0,0)
|
||||
|
||||
# Commit and save to backend
|
||||
block:
|
||||
let rc = tx.commit()
|
||||
xCheckRc rc.error == 0
|
||||
|
||||
block:
|
||||
let rc = db.txTop()
|
||||
xCheckErr rc.value.level < 0 # force error
|
||||
|
||||
block:
|
||||
let rc = db.schedStow()
|
||||
xCheckRc rc.error == 0
|
||||
|
||||
# Update layers to original level
|
||||
tx = db.txBegin().value.to(AristoDbRef).txBegin().value
|
||||
|
||||
true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public test function
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc testMergeProofAndKvpList*(
|
||||
noisy: bool;
|
||||
list: openArray[ProofTrieData];
|
||||
rdbPath: string; # Rocks DB storage directory
|
||||
idPfx = "";
|
||||
): bool =
|
||||
var
|
||||
ps = PartStateRef(nil)
|
||||
tx = AristoTxRef(nil)
|
||||
rootKey: Hash256
|
||||
defer:
|
||||
if not ps.isNil:
|
||||
ps.db.finish(eradicate=true)
|
||||
|
||||
for n,w in list:
|
||||
|
||||
# Start new database upon request
|
||||
if w.root != rootKey or w.proof.len == 0:
|
||||
ps.innerCleanUp()
|
||||
let db = block:
|
||||
# New DB with disabled filter slots management
|
||||
if 0 < rdbPath.len:
|
||||
let (dbOpts, cfOpts) = DbOptions.init().toRocksDb()
|
||||
let rc = AristoDbRef.init(RdbBackendRef, rdbPath, dbOpts, cfOpts, [])
|
||||
xCheckRc rc.error == 0
|
||||
rc.value()[0]
|
||||
else:
|
||||
AristoDbRef.init(MemBackendRef)
|
||||
ps = PartStateRef.init(db)
|
||||
|
||||
# Start transaction (double frame for testing)
|
||||
tx = ps.db.txBegin().value.to(AristoDbRef).txBegin().value
|
||||
xCheck tx.isTop()
|
||||
|
||||
# Update root
|
||||
rootKey = w.root
|
||||
|
||||
let
|
||||
db = ps.db
|
||||
testId = idPfx & "#" & $w.id & "." & $n
|
||||
|
||||
if 0 < w.proof.len:
|
||||
let rc = ps.partPut(w.proof, ForceGenericPayload)
|
||||
xCheckRc rc.error == 0
|
||||
|
||||
block:
|
||||
let rc = ps.check()
|
||||
xCheckRc rc.error == (0,0)
|
||||
|
||||
for ltp in w.kvpLst:
|
||||
block:
|
||||
let rc = ps.partMergeGenericData(
|
||||
testRootVid, @(ltp.leafTie.path), ltp.payload.rawBlob)
|
||||
xCheckRc rc.error == 0
|
||||
block:
|
||||
let rc = ps.check()
|
||||
xCheckRc rc.error == (0,0)
|
||||
|
||||
block:
|
||||
let saveBeOk = tx.saveToBackend(noisy=noisy, debugID=n)
|
||||
xCheck saveBeOk
|
||||
|
||||
true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
|
@ -0,0 +1,232 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or
|
||||
# distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[json, os, sets, strutils, tables],
|
||||
eth/common,
|
||||
stew/byteutils,
|
||||
results,
|
||||
unittest2,
|
||||
../test_helpers,
|
||||
../../nimbus/db/aristo,
|
||||
../../nimbus/db/aristo/[aristo_desc, aristo_get, aristo_hike, aristo_layers,
|
||||
aristo_part],
|
||||
../../nimbus/db/aristo/aristo_part/part_debug
|
||||
|
||||
type
|
||||
ProofData = ref object
|
||||
chain: seq[Blob]
|
||||
error: AristoError
|
||||
hike: Hike
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helper
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc createPartDb(ps: PartStateRef; data: seq[Blob]; info: static[string]) =
|
||||
# Set up production MPT
|
||||
block:
|
||||
let rc = ps.partPut(data, AutomaticPayload)
|
||||
if rc.isErr: raiseAssert info & ": partPut => " & $rc.error
|
||||
|
||||
# Save keys to database
|
||||
for (rvid,key) in ps.vkPairs:
|
||||
ps.db.layersPutKey(rvid, key)
|
||||
|
||||
# Make sure all is OK
|
||||
block:
|
||||
let rc = ps.check()
|
||||
if rc.isErr: raiseAssert info & ": check => " & $rc.error
|
||||
|
||||
|
||||
proc preLoadAristoDb(jKvp: JsonNode): PartStateRef =
|
||||
const info = "preLoadAristoDb"
|
||||
let ps = PartStateRef.init AristoDbRef.init()
|
||||
|
||||
# Collect rlp-encodede node blobs
|
||||
var proof: seq[Blob]
|
||||
for (k,v) in jKvp.pairs:
|
||||
let
|
||||
key = hexToSeqByte(k)
|
||||
val = hexToSeqByte(v.getStr())
|
||||
if key.len == 32:
|
||||
doAssert key == val.keccakHash.data
|
||||
if val != @[0x80u8]: # Exclude empty item
|
||||
proof.add val
|
||||
|
||||
ps.createPartDb(proof, info)
|
||||
ps
|
||||
|
||||
|
||||
proc collectAddresses(node: JsonNode, collect: var HashSet[EthAddress]) =
|
||||
case node.kind:
|
||||
of JObject:
|
||||
for k,v in node.pairs:
|
||||
if k == "address" and v.kind == JString:
|
||||
collect.incl EthAddress.fromHex v.getStr
|
||||
else:
|
||||
v.collectAddresses collect
|
||||
of JArray:
|
||||
for v in node.items:
|
||||
v.collectAddresses collect
|
||||
else:
|
||||
discard
|
||||
|
||||
|
||||
proc payloadAsBlob(pyl: LeafPayload; ps: PartStateRef): Blob =
|
||||
## Modified function `aristo_serialise.serialise()`.
|
||||
##
|
||||
const info = "payloadAsBlob"
|
||||
case pyl.pType:
|
||||
of RawData:
|
||||
pyl.rawBlob
|
||||
of AccountData:
|
||||
let
|
||||
vid = pyl.stoID
|
||||
key = block:
|
||||
if vid.isValid:
|
||||
let rc = ps.db.getKeyRc (VertexID(1),vid)
|
||||
if rc.isErr:
|
||||
raiseAssert info & ": getKey => " & $rc.error
|
||||
rc.value[0]
|
||||
else:
|
||||
VOID_HASH_KEY
|
||||
|
||||
rlp.encode Account(
|
||||
nonce: pyl.account.nonce,
|
||||
balance: pyl.account.balance,
|
||||
storageRoot: key.to(Hash256),
|
||||
codeHash: pyl.account.codeHash)
|
||||
of StoData:
|
||||
rlp.encode pyl.stoData
|
||||
|
||||
|
||||
func asExtension(b: Blob; path: Hash256): Blob =
|
||||
var node = rlpFromBytes b
|
||||
if node.listLen == 17:
|
||||
let nibble = NibblesBuf.fromBytes(path.data)[0]
|
||||
var wr = initRlpWriter()
|
||||
|
||||
wr.startList(2)
|
||||
wr.append NibblesBuf.fromBytes(@[nibble]).slice(1).toHexPrefix(isleaf=false)
|
||||
wr.append node.listElem(nibble.int).toBytes
|
||||
wr.finish()
|
||||
|
||||
else:
|
||||
b
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private test functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc testCreatePortalProof(node: JsonNode, testStatusIMPL: var TestStatus) =
|
||||
const info = "testCreateProofTwig"
|
||||
|
||||
# Create partial database
|
||||
let ps = node["state"].preLoadAristoDb()
|
||||
|
||||
# Collect addresses from json structure
|
||||
var addresses: HashSet[EthAddress]
|
||||
node.collectAddresses addresses
|
||||
|
||||
# Convert addresses to valid paths (not all addresses might work)
|
||||
var sample: Table[Hash256,ProofData]
|
||||
for a in addresses:
|
||||
let
|
||||
path = a.keccakHash
|
||||
rc = path.hikeUp(VertexID(1), ps.db)
|
||||
sample[path] = ProofData(
|
||||
error: (if rc.isErr: rc.error[1] else: AristoError(0)),
|
||||
hike: rc.to(Hike)) # keep `hike` for potential debugging
|
||||
|
||||
# Verify that there is somehing to do, at all
|
||||
check 0 < sample.values.toSeq.filterIt(it.error == AristoError 0).len
|
||||
|
||||
# Create proof chains
|
||||
for (path,proof) in sample.pairs:
|
||||
let rc = ps.db.partAccountTwig path
|
||||
check rc.isOk == (proof.error == AristoError 0)
|
||||
if rc.isOk:
|
||||
proof.chain = rc.value
|
||||
|
||||
# Verify proof chains
|
||||
for (path,proof) in sample.pairs:
|
||||
if proof.error == AristoError 0:
|
||||
let
|
||||
rVid = proof.hike.root
|
||||
pyl = proof.hike.legs[^1].wp.vtx.lData.payloadAsBlob(ps)
|
||||
|
||||
block:
|
||||
# Use these root and chain
|
||||
let chain = proof.chain
|
||||
|
||||
# Create another partial database from tree
|
||||
let pq = PartStateRef.init AristoDbRef.init()
|
||||
pq.createPartDb(chain, info)
|
||||
|
||||
# Create the same proof again which must result into the same as before
|
||||
block:
|
||||
let rc = pq.db.partAccountTwig path
|
||||
check rc.isOk
|
||||
if rc.isOk:
|
||||
check rc.value == proof.chain
|
||||
|
||||
# Verify proof
|
||||
let root = pq.db.getKey((rVid,rVid)).to(Hash256)
|
||||
block:
|
||||
let rc = proof.chain.partUntwig(root, path)
|
||||
check rc.isOk
|
||||
if rc.isOk:
|
||||
check rc.value == pyl
|
||||
|
||||
# Just for completeness (same a above combined into a single function)
|
||||
check proof.chain.partUntwigOk(root, path, pyl).isOk
|
||||
|
||||
# Extension nodes are rare, so there is one created, inserted and the
|
||||
# previous test repeated.
|
||||
block:
|
||||
let
|
||||
ext = proof.chain[0].asExtension(path)
|
||||
tail = @(proof.chain.toOpenArray(1,proof.chain.len-1))
|
||||
chain = @[ext] & tail
|
||||
|
||||
# Create a third partial database from modified proof
|
||||
let pq = PartStateRef.init AristoDbRef.init()
|
||||
pq.createPartDb(chain, info)
|
||||
|
||||
# Re-create proof again
|
||||
block:
|
||||
let rc = pq.db.partAccountTwig path
|
||||
check rc.isOk
|
||||
if rc.isOk:
|
||||
check rc.value == chain
|
||||
|
||||
let root = pq.db.getKey((rVid,rVid)).to(Hash256)
|
||||
block:
|
||||
let rc = chain.partUntwig(root, path)
|
||||
check rc.isOk
|
||||
if rc.isOk:
|
||||
check rc.value == pyl
|
||||
|
||||
check chain.partUntwigOk(root, path, pyl).isOk
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Test
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
suite "Encoding & verification of portal proof twigs for Aristo DB":
|
||||
# Piggyback on tracer test suite environment
|
||||
jsonTest("TracerTests", testCreatePortalProof)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
|
@ -26,7 +26,6 @@ import
|
|||
aristo_get,
|
||||
aristo_hike,
|
||||
aristo_init/persistent,
|
||||
aristo_layers,
|
||||
aristo_nearby,
|
||||
aristo_part,
|
||||
aristo_part/part_debug,
|
||||
|
@ -42,9 +41,6 @@ type
|
|||
## (<sample-name> & "#" <instance>, (<vertex-id>,<error-symbol>))
|
||||
|
||||
const
|
||||
MaxFilterBulk = 150_000
|
||||
## Policy settig for `pack()`
|
||||
|
||||
testRootVid = VertexID(2)
|
||||
## Need to reconfigure for the test, root ID 1 cannot be deleted as a trie
|
||||
|
||||
|
@ -121,26 +117,7 @@ proc innerCleanUp(db: var AristoDbRef): bool {.discardable.} =
|
|||
db = AristoDbRef(nil)
|
||||
true
|
||||
|
||||
proc innerCleanUp(ps: var PartStateRef): bool {.discardable.} =
|
||||
if not ps.isNil:
|
||||
if not ps.db.innerCleanUp():
|
||||
return false
|
||||
ps = PartStateRef(nil)
|
||||
true
|
||||
|
||||
proc schedStow(
|
||||
db: AristoDbRef; # Database
|
||||
): Result[void,AristoError] =
|
||||
## Scheduled storage
|
||||
let
|
||||
layersMeter = db.nLayersVtx() + db.nLayersKey()
|
||||
filterMeter = if db.balancer.isNil: 0
|
||||
else: db.balancer.sTab.len + db.balancer.kMap.len
|
||||
persistent = MaxFilterBulk < max(layersMeter, filterMeter)
|
||||
if persistent:
|
||||
db.persist()
|
||||
else:
|
||||
db.stow()
|
||||
# --------------------------------
|
||||
|
||||
proc saveToBackend(
|
||||
tx: var AristoTxRef;
|
||||
|
@ -197,52 +174,6 @@ proc saveToBackend(
|
|||
|
||||
true
|
||||
|
||||
proc saveToBackendWithOops(
|
||||
tx: var AristoTxRef;
|
||||
noisy: bool;
|
||||
debugID: int;
|
||||
oops: (int,AristoError);
|
||||
): bool =
|
||||
var db = tx.to(AristoDbRef)
|
||||
|
||||
# Verify context: nesting level must be 2 (i.e. two transactions)
|
||||
xCheck tx.level == 2
|
||||
|
||||
# Commit and hashify the current layer
|
||||
block:
|
||||
let rc = tx.commit()
|
||||
xCheckRc rc.error == 0
|
||||
|
||||
block:
|
||||
let rc = db.txTop()
|
||||
xCheckRc rc.error == 0
|
||||
tx = rc.value
|
||||
|
||||
# Verify context: nesting level must be 1 (i.e. one transaction)
|
||||
xCheck tx.level == 1
|
||||
|
||||
# Commit and save to backend
|
||||
block:
|
||||
let rc = tx.commit()
|
||||
xCheckRc rc.error == 0
|
||||
|
||||
block:
|
||||
let rc = db.txTop()
|
||||
xCheckErr rc.value.level < 0 # force error
|
||||
|
||||
block:
|
||||
let rc = db.schedStow()
|
||||
xCheckRc rc.error == 0:
|
||||
noisy.say "***", "saveToBackendWithOops(8)",
|
||||
" debugID=", debugID,
|
||||
"\n db\n ", db.pp(backendOk=true),
|
||||
""
|
||||
|
||||
# Update layers to original level
|
||||
tx = db.txBegin().value.to(AristoDbRef).txBegin().value
|
||||
|
||||
true
|
||||
|
||||
|
||||
proc fwdWalkVerify(
|
||||
db: AristoDbRef;
|
||||
|
@ -500,114 +431,6 @@ proc testTxMergeAndDeleteSubTree*(
|
|||
|
||||
true
|
||||
|
||||
|
||||
proc testTxMergeProofAndKvpList*(
|
||||
noisy: bool;
|
||||
list: openArray[ProofTrieData];
|
||||
rdbPath: string; # Rocks DB storage directory
|
||||
resetDb = false;
|
||||
idPfx = "";
|
||||
oops: KnownHasherFailure = @[];
|
||||
): bool =
|
||||
let
|
||||
oopsTab = oops.toTable
|
||||
var
|
||||
ps = PartStateRef(nil)
|
||||
tx = AristoTxRef(nil)
|
||||
rootKey: Hash256
|
||||
count = 0
|
||||
defer:
|
||||
if not ps.isNil:
|
||||
ps.db.finish(eradicate=true)
|
||||
|
||||
for n,w in list:
|
||||
|
||||
# Start new database upon request
|
||||
if resetDb or w.root != rootKey or w.proof.len == 0:
|
||||
ps.innerCleanUp()
|
||||
let db = block:
|
||||
# New DB with disabled filter slots management
|
||||
if 0 < rdbPath.len:
|
||||
let (dbOpts, cfOpts) = DbOptions.init().toRocksDb()
|
||||
let rc = AristoDbRef.init(RdbBackendRef, rdbPath, dbOpts, cfOpts, [])
|
||||
xCheckRc rc.error == 0
|
||||
rc.value()[0]
|
||||
else:
|
||||
AristoDbRef.init(MemBackendRef)
|
||||
ps = PartStateRef.init(db)
|
||||
|
||||
# Start transaction (double frame for testing)
|
||||
tx = ps.db.txBegin().value.to(AristoDbRef).txBegin().value
|
||||
xCheck tx.isTop()
|
||||
|
||||
# Update root
|
||||
rootKey = w.root
|
||||
count = 0
|
||||
count.inc
|
||||
|
||||
let
|
||||
db = ps.db
|
||||
testId = idPfx & "#" & $w.id & "." & $n
|
||||
runID = n
|
||||
sTabLen = db.nLayersVtx()
|
||||
leafs = w.kvpLst.mapRootVid testRootVid # merge into main trie
|
||||
|
||||
if 0 < w.proof.len:
|
||||
let rc = ps.partPut(w.proof, ForceGenericPayload)
|
||||
xCheckRc rc.error == 0:
|
||||
noisy.say "***", "testTxMergeProofAndKvpList (5)",
|
||||
" <", n, "/", list.len-1, ">",
|
||||
" runID=", runID,
|
||||
" nGroup=", count,
|
||||
" error=", rc.error,
|
||||
" nProof=", w.proof.len,
|
||||
"\n ps\n \n", ps.pp(),
|
||||
""
|
||||
block:
|
||||
let rc = ps.check()
|
||||
xCheckRc rc.error == (0,0)
|
||||
|
||||
when true and false:
|
||||
noisy.say "***", "testTxMergeProofAndKvpList (6)",
|
||||
" <", n, "/", list.len-1, ">",
|
||||
" runID=", runID,
|
||||
" nGroup=", count,
|
||||
" nProof=", w.proof.len,
|
||||
#"\n ps\n \n", ps.pp(),
|
||||
""
|
||||
|
||||
for ltp in leafs:
|
||||
block:
|
||||
let rc = ps.partMergeGenericData(
|
||||
ltp.leafTie.root, @(ltp.leafTie.path), ltp.payload.rawBlob)
|
||||
xCheckRc rc.error == 0
|
||||
block:
|
||||
let rc = ps.check()
|
||||
xCheckRc rc.error == (0,0)
|
||||
|
||||
when true and false:
|
||||
noisy.say "***", "testTxMergeProofAndKvpList (7)",
|
||||
" <", n, "/", list.len-1, ">",
|
||||
" runID=", runID,
|
||||
" nGroup=", count,
|
||||
" nProof=", w.proof.len,
|
||||
#"\n ps\n \n", ps.pp(),
|
||||
""
|
||||
|
||||
block:
|
||||
let
|
||||
oops = oopsTab.getOrDefault(testId,(0,AristoError(0)))
|
||||
saveBeOk = tx.saveToBackendWithOops(noisy=noisy, debugID=runID, oops)
|
||||
xCheck saveBeOk
|
||||
|
||||
when true and false:
|
||||
noisy.say "***", "testTxMergeProofAndKvpList (9)",
|
||||
" <", n, "/", list.len-1, ">",
|
||||
" runID=", runID,
|
||||
" nGroup=", count, " merged=", merged
|
||||
|
||||
true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
Loading…
Reference in New Issue