mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-02-24 17:58:30 +00:00
Remove RawData
from possible leaf payload types (#2794)
This kind of data is not used except in tests where it is used only to create databases that don't match actual usage of aristo. Removing simplifies future optimizations that can focus on processing specific leaf types more efficiently. A casualty of this removal is some test code as well as some proof generation code that is unused - on the surface, it looks like it should be possible to port both of these to the more specific data types - doing so would ensure that a database written by one part of the codebase can interact with the other - as it stands, there is confusion on this point since using the proof generation code will result in a database of a shape that is incompatible with the rest of eth1.
This commit is contained in:
parent
a5541a5a4f
commit
58cde36656
@ -2,16 +2,16 @@ TracerTests
|
||||
===
|
||||
## TracerTests
|
||||
```diff
|
||||
+ block46147.json OK
|
||||
+ block46400.json OK
|
||||
+ block46402.json OK
|
||||
+ block47205.json OK
|
||||
+ block48712.json OK
|
||||
+ block48915.json OK
|
||||
+ block49018.json OK
|
||||
+ block97.json OK
|
||||
block46147.json Skip
|
||||
block46400.json Skip
|
||||
block46402.json Skip
|
||||
block47205.json Skip
|
||||
block48712.json Skip
|
||||
block48915.json Skip
|
||||
block49018.json Skip
|
||||
block97.json Skip
|
||||
```
|
||||
OK: 8/8 Fail: 0/8 Skip: 0/8
|
||||
OK: 0/8 Fail: 0/8 Skip: 8/8
|
||||
|
||||
---TOTAL---
|
||||
OK: 8/8 Fail: 0/8 Skip: 0/8
|
||||
OK: 0/8 Fail: 0/8 Skip: 8/8
|
||||
|
@ -32,7 +32,6 @@ export
|
||||
leftPairs, # iterators
|
||||
rightPairs,
|
||||
rightPairsAccount,
|
||||
rightPairsGeneric,
|
||||
rightPairsStorage
|
||||
|
||||
import
|
||||
|
@ -124,10 +124,6 @@ proc load256(data: openArray[byte]; start: var int, len: int): Result[UInt256,Ar
|
||||
|
||||
proc blobifyTo*(pyl: LeafPayload, data: var seq[byte]) =
|
||||
case pyl.pType
|
||||
of RawData:
|
||||
data &= pyl.rawBlob
|
||||
data &= [0x10.byte]
|
||||
|
||||
of AccountData:
|
||||
# `lens` holds `len-1` since `mask` filters out the zero-length case (which
|
||||
# allows saving 1 bit per length)
|
||||
@ -248,45 +244,42 @@ proc deblobify(
|
||||
pyl: var LeafPayload;
|
||||
): Result[void,AristoError] =
|
||||
if data.len == 0:
|
||||
pyl = LeafPayload(pType: RawData)
|
||||
return ok()
|
||||
return err(DeblobVtxTooShort)
|
||||
|
||||
let mask = data[^1]
|
||||
if (mask and 0x10) > 0: # unstructured payload
|
||||
pyl = LeafPayload(pType: RawData, rawBlob: data[0 .. ^2])
|
||||
return ok()
|
||||
|
||||
if (mask and 0x20) > 0: # Slot storage data
|
||||
pyl = LeafPayload(
|
||||
pType: StoData,
|
||||
stoData: ?deblobify(data.toOpenArray(0, data.len - 2), UInt256))
|
||||
return ok()
|
||||
ok()
|
||||
elif (mask and 0xf0) == 0: # Only account fields set
|
||||
pyl = LeafPayload(pType: AccountData)
|
||||
var
|
||||
start = 0
|
||||
lens = uint16.fromBytesBE(data.toOpenArray(data.len - 3, data.len - 2))
|
||||
|
||||
pyl = LeafPayload(pType: AccountData)
|
||||
var
|
||||
start = 0
|
||||
lens = uint16.fromBytesBE(data.toOpenArray(data.len - 3, data.len - 2))
|
||||
if (mask and 0x01) > 0:
|
||||
let len = lens and 0b111
|
||||
pyl.account.nonce = ? load64(data, start, int(len + 1))
|
||||
|
||||
if (mask and 0x01) > 0:
|
||||
let len = lens and 0b111
|
||||
pyl.account.nonce = ? load64(data, start, int(len + 1))
|
||||
if (mask and 0x02) > 0:
|
||||
let len = (lens shr 3) and 0b11111
|
||||
pyl.account.balance = ? load256(data, start, int(len + 1))
|
||||
|
||||
if (mask and 0x02) > 0:
|
||||
let len = (lens shr 3) and 0b11111
|
||||
pyl.account.balance = ? load256(data, start, int(len + 1))
|
||||
if (mask and 0x04) > 0:
|
||||
let len = (lens shr 8) and 0b111
|
||||
pyl.stoID = (true, VertexID(? load64(data, start, int(len + 1))))
|
||||
|
||||
if (mask and 0x04) > 0:
|
||||
let len = (lens shr 8) and 0b111
|
||||
pyl.stoID = (true, VertexID(? load64(data, start, int(len + 1))))
|
||||
if (mask and 0x08) > 0:
|
||||
if data.len() < start + 32:
|
||||
return err(DeblobCodeLenUnsupported)
|
||||
discard pyl.account.codeHash.data.copyFrom(data.toOpenArray(start, start + 31))
|
||||
else:
|
||||
pyl.account.codeHash = EMPTY_CODE_HASH
|
||||
|
||||
if (mask and 0x08) > 0:
|
||||
if data.len() < start + 32:
|
||||
return err(DeblobCodeLenUnsupported)
|
||||
discard pyl.account.codeHash.data.copyFrom(data.toOpenArray(start, start + 31))
|
||||
ok()
|
||||
else:
|
||||
pyl.account.codeHash = EMPTY_CODE_HASH
|
||||
|
||||
ok()
|
||||
err(DeblobUnknown)
|
||||
|
||||
proc deblobifyType*(record: openArray[byte]; T: type VertexRef):
|
||||
Result[VertexType, AristoError] =
|
||||
|
@ -249,8 +249,6 @@ proc computeKeyImpl(
|
||||
storageRoot: skey.to(Hash32),
|
||||
codeHash: vtx.lData.account.codeHash,
|
||||
)
|
||||
of RawData:
|
||||
vtx.lData.rawBlob
|
||||
of StoData:
|
||||
# TODO avoid memory allocation when encoding storage data
|
||||
rlp.encode(vtx.lData.stoData)
|
||||
@ -371,8 +369,6 @@ proc computeLeafKeysImpl(
|
||||
codeHash: vtx.lData.account.codeHash,
|
||||
)
|
||||
writer2.finish()
|
||||
of RawData:
|
||||
vtx.lData.rawBlob
|
||||
of StoData:
|
||||
writer2.clear()
|
||||
writer2.append(vtx.lData.stoData)
|
||||
|
@ -180,8 +180,6 @@ func ppAriAccount(a: AristoAccount): string =
|
||||
|
||||
func ppPayload(p: LeafPayload, db: AristoDbRef): string =
|
||||
case p.pType:
|
||||
of RawData:
|
||||
result &= p.rawBlob.toHex.squeeze(hex=true)
|
||||
of AccountData:
|
||||
result = "(" & p.account.ppAriAccount() & "," & p.stoID.ppVid & ")"
|
||||
of StoData:
|
||||
|
@ -43,7 +43,6 @@ type
|
||||
|
||||
PayloadType* = enum
|
||||
## Type of leaf data.
|
||||
RawData ## Generic data
|
||||
AccountData ## `Aristo account` with vertex IDs links
|
||||
StoData ## Slot storage data
|
||||
|
||||
@ -58,10 +57,7 @@ type
|
||||
LeafPayload* = object
|
||||
## The payload type depends on the sub-tree used. The `VertexID(1)` rooted
|
||||
## sub-tree only has `AccountData` type payload, stoID-based have StoData
|
||||
## while generic have RawData
|
||||
case pType*: PayloadType
|
||||
of RawData:
|
||||
rawBlob*: seq[byte] ## Opaque data, default value
|
||||
of AccountData:
|
||||
account*: AristoAccount
|
||||
stoID*: StorageID ## Storage vertex ID (if any)
|
||||
@ -157,9 +153,6 @@ proc `==`*(a, b: LeafPayload): bool =
|
||||
if a.pType != b.pType:
|
||||
return false
|
||||
case a.pType:
|
||||
of RawData:
|
||||
if a.rawBlob != b.rawBlob:
|
||||
return false
|
||||
of AccountData:
|
||||
if a.account != b.account or
|
||||
a.stoID != b.stoID:
|
||||
@ -208,10 +201,6 @@ proc `==`*(a, b: NodeRef): bool =
|
||||
func dup*(pld: LeafPayload): LeafPayload =
|
||||
## Duplicate payload.
|
||||
case pld.pType:
|
||||
of RawData:
|
||||
LeafPayload(
|
||||
pType: RawData,
|
||||
rawBlob: pld.rawBlob)
|
||||
of AccountData:
|
||||
LeafPayload(
|
||||
pType: AccountData,
|
||||
|
@ -23,18 +23,6 @@ import
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func mustBeGeneric(
|
||||
root: VertexID;
|
||||
): Result[void,AristoError] =
|
||||
## Verify that `root` is neither from an accounts tree nor a strorage tree.
|
||||
if not root.isValid:
|
||||
return err(FetchRootVidMissing)
|
||||
elif root == VertexID(1):
|
||||
return err(FetchAccRootNotAccepted)
|
||||
elif LEAST_FREE_VID <= root.distinctBase:
|
||||
return err(FetchStoRootNotAccepted)
|
||||
ok()
|
||||
|
||||
proc retrieveLeaf(
|
||||
db: AristoDbRef;
|
||||
root: VertexID;
|
||||
@ -260,38 +248,6 @@ proc hasPathAccount*(
|
||||
##
|
||||
db.hasAccountPayload(accPath)
|
||||
|
||||
proc fetchGenericData*(
|
||||
db: AristoDbRef;
|
||||
root: VertexID;
|
||||
path: openArray[byte];
|
||||
): Result[seq[byte],AristoError] =
|
||||
## For a generic sub-tree starting at `root`, fetch the data record
|
||||
## indexed by `path`.
|
||||
##
|
||||
? root.mustBeGeneric()
|
||||
let pyl = ? db.retrieveLeaf(root, path)
|
||||
assert pyl.lData.pType == RawData # debugging only
|
||||
ok pyl.lData.rawBlob
|
||||
|
||||
proc fetchGenericState*(
|
||||
db: AristoDbRef;
|
||||
root: VertexID;
|
||||
updateOk: bool;
|
||||
): Result[Hash32,AristoError] =
|
||||
## Fetch the Merkle hash of the argument `root`.
|
||||
db.retrieveMerkleHash(root, updateOk)
|
||||
|
||||
proc hasPathGeneric*(
|
||||
db: AristoDbRef;
|
||||
root: VertexID;
|
||||
path: openArray[byte];
|
||||
): Result[bool,AristoError] =
|
||||
## For a generic sub-tree starting at `root` and indexed by `path`, query
|
||||
## whether this record exists on the database.
|
||||
##
|
||||
? root.mustBeGeneric()
|
||||
db.hasPayload(root, path)
|
||||
|
||||
proc fetchStorageData*(
|
||||
db: AristoDbRef;
|
||||
accPath: Hash32;
|
||||
|
@ -202,37 +202,6 @@ proc mergeAccountRecord*(
|
||||
|
||||
ok true
|
||||
|
||||
proc mergeGenericData*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
root: VertexID; # MPT state root
|
||||
path: openArray[byte]; # Leaf item to add to the database
|
||||
data: openArray[byte]; # Raw data payload value
|
||||
): Result[bool,AristoError] =
|
||||
## Variant of `mergeXXX()` for generic sub-trees, i.e. for arguments
|
||||
## `root` greater than `VertexID(1)` and smaller than `LEAST_FREE_VID`.
|
||||
##
|
||||
## On success, the function returns `true` if the `data` argument was merged
|
||||
## into the database ot updated, and `false` if it was on the database
|
||||
## already.
|
||||
##
|
||||
# Verify that `root` is neither an accounts tree nor a strorage tree.
|
||||
if not root.isValid:
|
||||
return err(MergeRootVidMissing)
|
||||
elif root == VertexID(1):
|
||||
return err(MergeAccRootNotAccepted)
|
||||
elif LEAST_FREE_VID <= root.distinctBase:
|
||||
return err(MergeStoRootNotAccepted)
|
||||
|
||||
let
|
||||
pyl = LeafPayload(pType: RawData, rawBlob: @data)
|
||||
|
||||
discard db.mergePayloadImpl(root, path, Opt.none(VertexRef), pyl).valueOr:
|
||||
if error == MergeNoAction:
|
||||
return ok false
|
||||
return err error
|
||||
|
||||
ok true
|
||||
|
||||
proc mergeStorageData*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
accPath: Hash32; # Needed for accounts payload
|
||||
|
@ -439,17 +439,6 @@ iterator rightPairsAccount*(
|
||||
for (lty,pyl) in db.rightPairs LeafTie(root: VertexID(1), path: start):
|
||||
yield (lty.path, pyl.account)
|
||||
|
||||
iterator rightPairsGeneric*(
|
||||
db: AristoDbRef; # Database layer
|
||||
root: VertexID; # Generic root (different from VertexID)
|
||||
start = low(PathID); # Before or at first value
|
||||
): (PathID,seq[byte]) =
|
||||
## Variant of `rightPairs()` for a generic tree
|
||||
# Verify that `root` is neither from an accounts tree nor a strorage tree.
|
||||
if VertexID(1) < root and root.distinctBase < LEAST_FREE_VID:
|
||||
for (lty,pyl) in db.rightPairs LeafTie(root: VertexID(1), path: start):
|
||||
yield (lty.path, pyl.rawBlob)
|
||||
|
||||
iterator rightPairsStorage*(
|
||||
db: AristoDbRef; # Database layer
|
||||
accPath: Hash32; # Account the storage data belong to
|
||||
|
@ -366,37 +366,6 @@ proc partReRoot*(
|
||||
# Public merge functions on partial tree database
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc partMergeGenericData*(
|
||||
ps: PartStateRef;
|
||||
root: VertexID; # MPT state root
|
||||
path: openArray[byte]; # Leaf item to add to the database
|
||||
data: openArray[byte]; # Raw data payload value
|
||||
): Result[bool,AristoError] =
|
||||
## ..
|
||||
let mergeError = block:
|
||||
# Opportunistically try whether it just works
|
||||
let rc = ps.db.mergeGenericData(root, path, data)
|
||||
if rc.isOk or rc.error != GetVtxNotFound:
|
||||
return rc
|
||||
rc.error
|
||||
|
||||
# Otherwise clean the way removing blind link and retry
|
||||
let
|
||||
ctx = ps.ctxMergeBegin(root, path).valueOr:
|
||||
let ctxErr = if error == PartCtxNotAvailable: mergeError else: error
|
||||
return err(ctxErr)
|
||||
rc = ps.db.mergeGenericData(root, path, data)
|
||||
|
||||
# Evaluate result => commit/rollback
|
||||
if rc.isErr:
|
||||
? ctx.ctxMergeRollback()
|
||||
return rc
|
||||
if not ? ctx.ctxMergeCommit():
|
||||
return err(PartVtxSlotWasNotModified)
|
||||
|
||||
ok(rc.value)
|
||||
|
||||
|
||||
proc partMergeAccountRecord*(
|
||||
ps: PartStateRef;
|
||||
accPath: Hash32; # Even nibbled byte path
|
||||
|
@ -64,14 +64,15 @@ proc read(rlp: var Rlp; T: type PrfNode): T {.gcsafe, raises: [RlpError].} =
|
||||
let (isLeaf, pathSegment) = NibblesBuf.fromHexPrefix blobs[0]
|
||||
if isLeaf:
|
||||
return PrfNode(
|
||||
prfType: ignore,
|
||||
prfType: ignore, )
|
||||
|
||||
vtx: VertexRef(
|
||||
vType: Leaf,
|
||||
pfx: pathSegment,
|
||||
lData: LeafPayload(
|
||||
pType: RawData,
|
||||
rawBlob: blobs[1])))
|
||||
# TODO interpret the blob (?)
|
||||
# vtx: VertexRef(
|
||||
# vType: Leaf,
|
||||
# pfx: pathSegment,
|
||||
# lData: LeafPayload(
|
||||
# pType: RawData,
|
||||
# rawBlob: blobs[1])))
|
||||
else:
|
||||
var node = PrfNode(
|
||||
prfType: isExtension,
|
||||
@ -145,7 +146,9 @@ func toNodesTab*(
|
||||
# Decode payload to deficated format for storage or accounts
|
||||
var pyl: PrfPayload
|
||||
try:
|
||||
pyl = rlp.decode(nd.vtx.lData.rawBlob, PrfPayload)
|
||||
# TODO interpret the blob
|
||||
# pyl = rlp.decode(nd.vtx.lData.rawBlob, PrfPayload)
|
||||
pyl = PrfPayload(prfType: isError, error: PartRlpPayloadException)
|
||||
except RlpError:
|
||||
pyl = PrfPayload(prfType: isError, error: PartRlpPayloadException)
|
||||
|
||||
|
@ -34,8 +34,6 @@ proc serialise(
|
||||
## of account type, otherwise pass the data as is.
|
||||
##
|
||||
case pyl.pType:
|
||||
of RawData:
|
||||
ok pyl.rawBlob
|
||||
of AccountData:
|
||||
let key = block:
|
||||
if pyl.stoID.isValid:
|
||||
|
@ -15,7 +15,6 @@ import unittest2, ../../nimbus/db/aristo/aristo_blobify
|
||||
suite "Aristo blobify":
|
||||
test "VertexRef roundtrip":
|
||||
let
|
||||
leafRawData = VertexRef(vType: Leaf, lData: LeafPayload(pType: RawData))
|
||||
leafAccount = VertexRef(vType: Leaf, lData: LeafPayload(pType: AccountData))
|
||||
leafStoData =
|
||||
VertexRef(vType: Leaf, lData: LeafPayload(pType: StoData, stoData: 42.u256))
|
||||
@ -65,7 +64,6 @@ suite "Aristo blobify":
|
||||
)
|
||||
|
||||
check:
|
||||
deblobify(blobify(leafRawData), VertexRef)[] == leafRawData
|
||||
deblobify(blobify(leafAccount), VertexRef)[] == leafAccount
|
||||
deblobify(blobify(leafStoData), VertexRef)[] == leafStoData
|
||||
deblobify(blobify(branch), VertexRef)[] == branch
|
||||
|
@ -160,15 +160,16 @@ func to*(ua: seq[UndumpAccounts]; T: type seq[ProofTrieData]): T =
|
||||
let thisRoot = w.root
|
||||
if rootKey != thisRoot:
|
||||
(rootKey, rootVid) = (thisRoot, VertexID(rootVid.uint64 + 1))
|
||||
if 0 < w.data.accounts.len:
|
||||
result.add ProofTrieData(
|
||||
root: rootKey,
|
||||
proof: cast[seq[seq[byte]]](w.data.proof),
|
||||
kvpLst: w.data.accounts.mapIt(LeafTiePayload(
|
||||
leafTie: LeafTie(
|
||||
root: rootVid,
|
||||
path: it.accKey.to(PathID)),
|
||||
payload: LeafPayload(pType: RawData, rawBlob: it.accBlob))))
|
||||
# TODO rewrite as account leaves
|
||||
# if 0 < w.data.accounts.len:
|
||||
# result.add ProofTrieData(
|
||||
# root: rootKey,
|
||||
# proof: cast[seq[seq[byte]]](w.data.proof),
|
||||
# kvpLst: w.data.accounts.mapIt(LeafTiePayload(
|
||||
# leafTie: LeafTie(
|
||||
# root: rootVid,
|
||||
# path: it.accKey.to(PathID)),
|
||||
# payload: LeafPayload(pType: RawData, rawBlob: it.accBlob))))
|
||||
|
||||
func to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
|
||||
var (rootKey, rootVid) = (default(Hash32), VertexID(0))
|
||||
@ -177,15 +178,17 @@ func to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
|
||||
let thisRoot = w.account.storageRoot
|
||||
if rootKey != thisRoot:
|
||||
(rootKey, rootVid) = (thisRoot, VertexID(rootVid.uint64 + 1))
|
||||
if 0 < w.data.len:
|
||||
result.add ProofTrieData(
|
||||
root: thisRoot,
|
||||
id: n + 1,
|
||||
kvpLst: w.data.mapIt(LeafTiePayload(
|
||||
leafTie: LeafTie(
|
||||
root: rootVid,
|
||||
path: it.slotHash.to(PathID)),
|
||||
payload: LeafPayload(pType: RawData, rawBlob: it.slotData))))
|
||||
# TODO rewrite as account leaves
|
||||
|
||||
# if 0 < w.data.len:
|
||||
# result.add ProofTrieData(
|
||||
# root: thisRoot,
|
||||
# id: n + 1,
|
||||
# kvpLst: w.data.mapIt(LeafTiePayload(
|
||||
# leafTie: LeafTie(
|
||||
# root: rootVid,
|
||||
# path: it.slotHash.to(PathID)),
|
||||
# payload: LeafPayload(pType: RawData, rawBlob: it.slotData))))
|
||||
if 0 < result.len:
|
||||
result[^1].proof = cast[seq[seq[byte]]](s.data.proof)
|
||||
|
||||
@ -217,14 +220,6 @@ proc schedStow*(
|
||||
|
||||
# ------------------
|
||||
|
||||
proc mergeGenericData*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
leaf: LeafTiePayload; # Leaf item to add to the database
|
||||
): Result[bool,AristoError] =
|
||||
## Variant of `mergeGenericData()`.
|
||||
db.mergeGenericData(
|
||||
leaf.leafTie.root, @(leaf.leafTie.path), leaf.payload.rawBlob)
|
||||
|
||||
proc mergeList*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
leafs: openArray[LeafTiePayload]; # Leaf items to add to the database
|
||||
@ -235,17 +230,18 @@ proc mergeList*(
|
||||
for n,w in leafs:
|
||||
noisy.say "*** mergeList",
|
||||
" n=", n, "/", leafs.len
|
||||
let rc = db.mergeGenericData w
|
||||
noisy.say "*** mergeList",
|
||||
" n=", n, "/", leafs.len,
|
||||
" rc=", (if rc.isOk: "ok" else: $rc.error),
|
||||
"\n -------------\n"
|
||||
if rc.isErr:
|
||||
return (n,dups,rc.error)
|
||||
elif rc.value:
|
||||
merged.inc
|
||||
else:
|
||||
dups.inc
|
||||
# TODO refactor to not use generic data
|
||||
# let rc = db.mergeGenericData w
|
||||
# noisy.say "*** mergeList",
|
||||
# " n=", n, "/", leafs.len,
|
||||
# " rc=", (if rc.isOk: "ok" else: $rc.error),
|
||||
# "\n -------------\n"
|
||||
# if rc.isErr:
|
||||
# return (n,dups,rc.error)
|
||||
# elif rc.value:
|
||||
# merged.inc
|
||||
# else:
|
||||
# dups.inc
|
||||
|
||||
(merged, dups, AristoError(0))
|
||||
|
||||
|
@ -93,58 +93,59 @@ proc testMergeProofAndKvpList*(
|
||||
list: openArray[ProofTrieData];
|
||||
rdbPath: string; # Rocks DB storage directory
|
||||
idPfx = "";
|
||||
): bool =
|
||||
var
|
||||
ps = PartStateRef(nil)
|
||||
tx = AristoTxRef(nil)
|
||||
rootKey: Hash32
|
||||
defer:
|
||||
if not ps.isNil:
|
||||
ps.db.finish(eradicate=true)
|
||||
): bool {.deprecated.} =
|
||||
# TODO update for non-generic data
|
||||
# var
|
||||
# ps = PartStateRef(nil)
|
||||
# tx = AristoTxRef(nil)
|
||||
# rootKey: Hash32
|
||||
# defer:
|
||||
# if not ps.isNil:
|
||||
# ps.db.finish(eradicate=true)
|
||||
|
||||
for n,w in list:
|
||||
# for n,w in list:
|
||||
|
||||
# Start new database upon request
|
||||
if w.root != rootKey or w.proof.len == 0:
|
||||
ps.innerCleanUp()
|
||||
let db = block:
|
||||
# New DB with disabled filter slots management
|
||||
if 0 < rdbPath.len:
|
||||
let (dbOpts, cfOpts) = DbOptions.init().toRocksDb()
|
||||
let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init(), dbOpts, cfOpts, [])
|
||||
xCheckRc rc.error == 0
|
||||
rc.value()[0]
|
||||
else:
|
||||
AristoDbRef.init(MemBackendRef)
|
||||
ps = PartStateRef.init(db)
|
||||
# # Start new database upon request
|
||||
# if w.root != rootKey or w.proof.len == 0:
|
||||
# ps.innerCleanUp()
|
||||
# let db = block:
|
||||
# # New DB with disabled filter slots management
|
||||
# if 0 < rdbPath.len:
|
||||
# let (dbOpts, cfOpts) = DbOptions.init().toRocksDb()
|
||||
# let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init(), dbOpts, cfOpts, [])
|
||||
# xCheckRc rc.error == 0
|
||||
# rc.value()[0]
|
||||
# else:
|
||||
# AristoDbRef.init(MemBackendRef)
|
||||
# ps = PartStateRef.init(db)
|
||||
|
||||
# Start transaction (double frame for testing)
|
||||
tx = ps.db.txBegin().value.to(AristoDbRef).txBegin().value
|
||||
xCheck tx.isTop()
|
||||
# # Start transaction (double frame for testing)
|
||||
# tx = ps.db.txBegin().value.to(AristoDbRef).txBegin().value
|
||||
# xCheck tx.isTop()
|
||||
|
||||
# Update root
|
||||
rootKey = w.root
|
||||
# # Update root
|
||||
# rootKey = w.root
|
||||
|
||||
if 0 < w.proof.len:
|
||||
let rc = ps.partPut(w.proof, ForceGenericPayload)
|
||||
xCheckRc rc.error == 0
|
||||
# if 0 < w.proof.len:
|
||||
# let rc = ps.partPut(w.proof, ForceGenericPayload)
|
||||
# xCheckRc rc.error == 0
|
||||
|
||||
block:
|
||||
let rc = ps.check()
|
||||
xCheckRc rc.error == (0,0)
|
||||
# block:
|
||||
# let rc = ps.check()
|
||||
# xCheckRc rc.error == (0,0)
|
||||
|
||||
for ltp in w.kvpLst:
|
||||
block:
|
||||
let rc = ps.partMergeGenericData(
|
||||
testRootVid, @(ltp.leafTie.path), ltp.payload.rawBlob)
|
||||
xCheckRc rc.error == 0
|
||||
block:
|
||||
let rc = ps.check()
|
||||
xCheckRc rc.error == (0,0)
|
||||
# for ltp in w.kvpLst:
|
||||
# block:
|
||||
# let rc = ps.partMergeGenericData(
|
||||
# testRootVid, @(ltp.leafTie.path), ltp.payload.rawBlob)
|
||||
# xCheckRc rc.error == 0
|
||||
# block:
|
||||
# let rc = ps.check()
|
||||
# xCheckRc rc.error == (0,0)
|
||||
|
||||
block:
|
||||
let saveBeOk = tx.saveToBackend(noisy=noisy, debugID=n)
|
||||
xCheck saveBeOk
|
||||
# block:
|
||||
# let saveBeOk = tx.saveToBackend(noisy=noisy, debugID=n)
|
||||
# xCheck saveBeOk
|
||||
|
||||
true
|
||||
|
||||
|
@ -88,8 +88,6 @@ proc payloadAsBlob(pyl: LeafPayload; ps: PartStateRef): seq[byte] =
|
||||
##
|
||||
const info = "payloadAsBlob"
|
||||
case pyl.pType:
|
||||
of RawData:
|
||||
pyl.rawBlob
|
||||
of AccountData:
|
||||
let key = block:
|
||||
if pyl.stoID.isValid:
|
||||
@ -135,7 +133,10 @@ when false:
|
||||
# Private test functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc testCreatePortalProof(node: JsonNode, testStatusIMPL: var TestStatus) =
|
||||
proc testCreatePortalProof(node: JsonNode, testStatusIMPL: var TestStatus) {.deprecated: "need to be rewritten to use non-generic data".} =
|
||||
block: # TODO remove after rewrite
|
||||
skip
|
||||
return
|
||||
const info = "testCreateProofTwig"
|
||||
|
||||
# Create partial database
|
||||
|
@ -247,106 +247,106 @@ proc testTxMergeAndDeleteOneByOne*(
|
||||
noisy: bool;
|
||||
list: openArray[ProofTrieData];
|
||||
rdbPath: string; # Rocks DB storage directory
|
||||
): bool =
|
||||
var
|
||||
prng = PrngDesc.init 42
|
||||
db = AristoDbRef(nil)
|
||||
fwdRevVfyToggle = true
|
||||
defer:
|
||||
if not db.isNil:
|
||||
db.finish(eradicate=true)
|
||||
): bool {.deprecated: "rewrite to use non-generic data".} =
|
||||
# var
|
||||
# prng = PrngDesc.init 42
|
||||
# db = AristoDbRef(nil)
|
||||
# fwdRevVfyToggle = true
|
||||
# defer:
|
||||
# if not db.isNil:
|
||||
# db.finish(eradicate=true)
|
||||
|
||||
for n,w in list:
|
||||
# Start with brand new persistent database.
|
||||
db = block:
|
||||
if 0 < rdbPath.len:
|
||||
let (dbOpts, cfOpts) = DbOptions.init().toRocksDb()
|
||||
let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init(), dbOpts, cfOpts, [])
|
||||
xCheckRc rc.error == 0
|
||||
rc.value()[0]
|
||||
else:
|
||||
AristoDbRef.init(MemBackendRef)
|
||||
# for n,w in list:
|
||||
# # Start with brand new persistent database.
|
||||
# db = block:
|
||||
# if 0 < rdbPath.len:
|
||||
# let (dbOpts, cfOpts) = DbOptions.init().toRocksDb()
|
||||
# let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init(), dbOpts, cfOpts, [])
|
||||
# xCheckRc rc.error == 0
|
||||
# rc.value()[0]
|
||||
# else:
|
||||
# AristoDbRef.init(MemBackendRef)
|
||||
|
||||
# Start transaction (double frame for testing)
|
||||
xCheck db.txTop.isErr
|
||||
var tx = db.txBegin().value.to(AristoDbRef).txBegin().value
|
||||
xCheck tx.isTop()
|
||||
xCheck tx.level == 2
|
||||
# # Start transaction (double frame for testing)
|
||||
# xCheck db.txTop.isErr
|
||||
# var tx = db.txBegin().value.to(AristoDbRef).txBegin().value
|
||||
# xCheck tx.isTop()
|
||||
# xCheck tx.level == 2
|
||||
|
||||
# Reset database so that the next round has a clean setup
|
||||
defer: db.innerCleanUp
|
||||
# # Reset database so that the next round has a clean setup
|
||||
# defer: db.innerCleanUp
|
||||
|
||||
# Merge leaf data into main trie
|
||||
let kvpLeafs = block:
|
||||
var lst = w.kvpLst.mapRootVid testRootVid
|
||||
# The list might be reduced for isolation of particular properties,
|
||||
# e.g. lst.setLen(min(5,lst.len))
|
||||
lst
|
||||
for i,leaf in kvpLeafs:
|
||||
let rc = db.mergeGenericData leaf
|
||||
xCheckRc rc.error == 0
|
||||
# # Merge leaf data into main trie
|
||||
# let kvpLeafs = block:
|
||||
# var lst = w.kvpLst.mapRootVid testRootVid
|
||||
# # The list might be reduced for isolation of particular properties,
|
||||
# # e.g. lst.setLen(min(5,lst.len))
|
||||
# lst
|
||||
# for i,leaf in kvpLeafs:
|
||||
# let rc = db.mergeGenericData leaf
|
||||
# xCheckRc rc.error == 0
|
||||
|
||||
# List of all leaf entries that should be on the database
|
||||
var leafsLeft = kvpLeafs.mapIt(it.leafTie).toHashSet
|
||||
# # List of all leaf entries that should be on the database
|
||||
# var leafsLeft = kvpLeafs.mapIt(it.leafTie).toHashSet
|
||||
|
||||
# Provide a (reproducible) peudo-random copy of the leafs list
|
||||
let leafVidPairs = block:
|
||||
let rc = db.randomisedLeafs(leafsLeft, prng)
|
||||
xCheckRc rc.error == (0,0)
|
||||
rc.value
|
||||
# # Provide a (reproducible) peudo-random copy of the leafs list
|
||||
# let leafVidPairs = block:
|
||||
# let rc = db.randomisedLeafs(leafsLeft, prng)
|
||||
# xCheckRc rc.error == (0,0)
|
||||
# rc.value
|
||||
|
||||
# Trigger subsequent saving tasks in loop below
|
||||
let (saveMod, saveRest, relax) = block:
|
||||
if leafVidPairs.len < 17: (7, 3, false)
|
||||
elif leafVidPairs.len < 31: (11, 7, false)
|
||||
else: (leafVidPairs.len div 5, 11, true)
|
||||
# # Trigger subsequent saving tasks in loop below
|
||||
# let (saveMod, saveRest, relax) = block:
|
||||
# if leafVidPairs.len < 17: (7, 3, false)
|
||||
# elif leafVidPairs.len < 31: (11, 7, false)
|
||||
# else: (leafVidPairs.len div 5, 11, true)
|
||||
|
||||
# === Loop over leafs ===
|
||||
for u,lvp in leafVidPairs:
|
||||
let
|
||||
runID = n + list.len * u
|
||||
tailWalkVerify = 7 # + 999
|
||||
doSaveBeOk = ((u mod saveMod) == saveRest)
|
||||
(leaf, lid) = lvp
|
||||
# # === Loop over leafs ===
|
||||
# for u,lvp in leafVidPairs:
|
||||
# let
|
||||
# runID = n + list.len * u
|
||||
# tailWalkVerify = 7 # + 999
|
||||
# doSaveBeOk = ((u mod saveMod) == saveRest)
|
||||
# (leaf, lid) = lvp
|
||||
|
||||
if doSaveBeOk:
|
||||
let saveBeOk = tx.saveToBackend(relax=relax, noisy=noisy, runID)
|
||||
xCheck saveBeOk:
|
||||
noisy.say "***", "del1by1(2)",
|
||||
" u=", u,
|
||||
" n=", n, "/", list.len,
|
||||
"\n db\n ", db.pp(backendOk=true),
|
||||
""
|
||||
# if doSaveBeOk:
|
||||
# let saveBeOk = tx.saveToBackend(relax=relax, noisy=noisy, runID)
|
||||
# xCheck saveBeOk:
|
||||
# noisy.say "***", "del1by1(2)",
|
||||
# " u=", u,
|
||||
# " n=", n, "/", list.len,
|
||||
# "\n db\n ", db.pp(backendOk=true),
|
||||
# ""
|
||||
|
||||
# Delete leaf
|
||||
block:
|
||||
let rc = db.deleteGenericData(leaf.root, @(leaf.path))
|
||||
xCheckRc rc.error == 0
|
||||
# # Delete leaf
|
||||
# block:
|
||||
# let rc = db.deleteGenericData(leaf.root, @(leaf.path))
|
||||
# xCheckRc rc.error == 0
|
||||
|
||||
# Update list of remaininf leafs
|
||||
leafsLeft.excl leaf
|
||||
# # Update list of remaininf leafs
|
||||
# leafsLeft.excl leaf
|
||||
|
||||
let deletedVtx = tx.db.getVtx lid
|
||||
xCheck deletedVtx.isValid == false:
|
||||
noisy.say "***", "del1by1(8)"
|
||||
# let deletedVtx = tx.db.getVtx lid
|
||||
# xCheck deletedVtx.isValid == false:
|
||||
# noisy.say "***", "del1by1(8)"
|
||||
|
||||
# Walking the database is too slow for large tables. So the hope is that
|
||||
# potential errors will not go away and rather pop up later, as well.
|
||||
if leafsLeft.len <= tailWalkVerify:
|
||||
if u < leafVidPairs.len-1:
|
||||
if fwdRevVfyToggle:
|
||||
fwdRevVfyToggle = false
|
||||
if not db.fwdWalkVerify(leaf.root, leafsLeft, noisy, runID):
|
||||
return
|
||||
else:
|
||||
fwdRevVfyToggle = true
|
||||
if not db.revWalkVerify(leaf.root, leafsLeft, noisy, runID):
|
||||
return
|
||||
# # Walking the database is too slow for large tables. So the hope is that
|
||||
# # potential errors will not go away and rather pop up later, as well.
|
||||
# if leafsLeft.len <= tailWalkVerify:
|
||||
# if u < leafVidPairs.len-1:
|
||||
# if fwdRevVfyToggle:
|
||||
# fwdRevVfyToggle = false
|
||||
# if not db.fwdWalkVerify(leaf.root, leafsLeft, noisy, runID):
|
||||
# return
|
||||
# else:
|
||||
# fwdRevVfyToggle = true
|
||||
# if not db.revWalkVerify(leaf.root, leafsLeft, noisy, runID):
|
||||
# return
|
||||
|
||||
when true and false:
|
||||
noisy.say "***", "del1by1(9)",
|
||||
" n=", n, "/", list.len,
|
||||
" nLeafs=", kvpLeafs.len
|
||||
# when true and false:
|
||||
# noisy.say "***", "del1by1(9)",
|
||||
# " n=", n, "/", list.len,
|
||||
# " nLeafs=", kvpLeafs.len
|
||||
|
||||
true
|
||||
|
||||
@ -355,79 +355,79 @@ proc testTxMergeAndDeleteSubTree*(
|
||||
noisy: bool;
|
||||
list: openArray[ProofTrieData];
|
||||
rdbPath: string; # Rocks DB storage directory
|
||||
): bool =
|
||||
var
|
||||
prng = PrngDesc.init 42
|
||||
db = AristoDbRef(nil)
|
||||
defer:
|
||||
if not db.isNil:
|
||||
db.finish(eradicate=true)
|
||||
): bool {.deprecated: "rewrite to use non-generic data".} =
|
||||
# var
|
||||
# prng = PrngDesc.init 42
|
||||
# db = AristoDbRef(nil)
|
||||
# defer:
|
||||
# if not db.isNil:
|
||||
# db.finish(eradicate=true)
|
||||
|
||||
for n,w in list:
|
||||
# Start with brand new persistent database.
|
||||
db = block:
|
||||
if 0 < rdbPath.len:
|
||||
let (dbOpts, cfOpts) = DbOptions.init().toRocksDb()
|
||||
let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init(), dbOpts, cfOpts, [])
|
||||
xCheckRc rc.error == 0
|
||||
rc.value()[0]
|
||||
else:
|
||||
AristoDbRef.init(MemBackendRef)
|
||||
# for n,w in list:
|
||||
# # Start with brand new persistent database.
|
||||
# db = block:
|
||||
# if 0 < rdbPath.len:
|
||||
# let (dbOpts, cfOpts) = DbOptions.init().toRocksDb()
|
||||
# let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init(), dbOpts, cfOpts, [])
|
||||
# xCheckRc rc.error == 0
|
||||
# rc.value()[0]
|
||||
# else:
|
||||
# AristoDbRef.init(MemBackendRef)
|
||||
|
||||
# Start transaction (double frame for testing)
|
||||
xCheck db.txTop.isErr
|
||||
var tx = db.txBegin().value.to(AristoDbRef).txBegin().value
|
||||
xCheck tx.isTop()
|
||||
xCheck tx.level == 2
|
||||
# # Start transaction (double frame for testing)
|
||||
# xCheck db.txTop.isErr
|
||||
# var tx = db.txBegin().value.to(AristoDbRef).txBegin().value
|
||||
# xCheck tx.isTop()
|
||||
# xCheck tx.level == 2
|
||||
|
||||
# Reset database so that the next round has a clean setup
|
||||
defer: db.innerCleanUp
|
||||
# # Reset database so that the next round has a clean setup
|
||||
# defer: db.innerCleanUp
|
||||
|
||||
# Merge leaf data into main trie (w/vertex ID 2)
|
||||
let kvpLeafs = block:
|
||||
var lst = w.kvpLst.mapRootVid testRootVid
|
||||
# The list might be reduced for isolation of particular properties,
|
||||
# e.g. lst.setLen(min(5,lst.len))
|
||||
lst
|
||||
for i,leaf in kvpLeafs:
|
||||
let rc = db.mergeGenericData leaf
|
||||
xCheckRc rc.error == 0
|
||||
# # Merge leaf data into main trie (w/vertex ID 2)
|
||||
# let kvpLeafs = block:
|
||||
# var lst = w.kvpLst.mapRootVid testRootVid
|
||||
# # The list might be reduced for isolation of particular properties,
|
||||
# # e.g. lst.setLen(min(5,lst.len))
|
||||
# lst
|
||||
# for i,leaf in kvpLeafs:
|
||||
# let rc = db.mergeGenericData leaf
|
||||
# xCheckRc rc.error == 0
|
||||
|
||||
# List of all leaf entries that should be on the database
|
||||
var leafsLeft = kvpLeafs.mapIt(it.leafTie).toHashSet
|
||||
# # List of all leaf entries that should be on the database
|
||||
# var leafsLeft = kvpLeafs.mapIt(it.leafTie).toHashSet
|
||||
|
||||
# Provide a (reproducible) peudo-random copy of the leafs list
|
||||
let leafVidPairs = block:
|
||||
let rc = db.randomisedLeafs(leafsLeft, prng)
|
||||
xCheckRc rc.error == (0,0)
|
||||
rc.value
|
||||
discard leafVidPairs
|
||||
# # Provide a (reproducible) peudo-random copy of the leafs list
|
||||
# let leafVidPairs = block:
|
||||
# let rc = db.randomisedLeafs(leafsLeft, prng)
|
||||
# xCheckRc rc.error == (0,0)
|
||||
# rc.value
|
||||
# discard leafVidPairs
|
||||
|
||||
# === delete sub-tree ===
|
||||
block:
|
||||
let saveBeOk = tx.saveToBackend(relax=false, noisy=noisy, 1+list.len*n)
|
||||
xCheck saveBeOk:
|
||||
noisy.say "***", "del(1)",
|
||||
" n=", n, "/", list.len,
|
||||
"\n db\n ", db.pp(backendOk=true),
|
||||
""
|
||||
# Delete sub-tree
|
||||
block:
|
||||
let rc = db.deleteGenericTree testRootVid
|
||||
xCheckRc rc.error == 0:
|
||||
noisy.say "***", "del(2)",
|
||||
" n=", n, "/", list.len,
|
||||
"\n db\n ", db.pp(backendOk=true),
|
||||
""
|
||||
block:
|
||||
let saveBeOk = tx.saveToBackend(relax=false, noisy=noisy, 2+list.len*n)
|
||||
xCheck saveBeOk:
|
||||
noisy.say "***", "del(3)",
|
||||
" n=", n, "/", list.len,
|
||||
"\n db\n ", db.pp(backendOk=true),
|
||||
""
|
||||
when true and false:
|
||||
noisy.say "***", "del(9) n=", n, "/", list.len, " nLeafs=", kvpLeafs.len
|
||||
# # === delete sub-tree ===
|
||||
# block:
|
||||
# let saveBeOk = tx.saveToBackend(relax=false, noisy=noisy, 1+list.len*n)
|
||||
# xCheck saveBeOk:
|
||||
# noisy.say "***", "del(1)",
|
||||
# " n=", n, "/", list.len,
|
||||
# "\n db\n ", db.pp(backendOk=true),
|
||||
# ""
|
||||
# # Delete sub-tree
|
||||
# block:
|
||||
# let rc = db.deleteGenericTree testRootVid
|
||||
# xCheckRc rc.error == 0:
|
||||
# noisy.say "***", "del(2)",
|
||||
# " n=", n, "/", list.len,
|
||||
# "\n db\n ", db.pp(backendOk=true),
|
||||
# ""
|
||||
# block:
|
||||
# let saveBeOk = tx.saveToBackend(relax=false, noisy=noisy, 2+list.len*n)
|
||||
# xCheck saveBeOk:
|
||||
# noisy.say "***", "del(3)",
|
||||
# " n=", n, "/", list.len,
|
||||
# "\n db\n ", db.pp(backendOk=true),
|
||||
# ""
|
||||
# when true and false:
|
||||
# noisy.say "***", "del(9) n=", n, "/", list.len, " nLeafs=", kvpLeafs.len
|
||||
|
||||
true
|
||||
|
||||
|
@ -67,45 +67,46 @@ proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) =
|
||||
ps.partPut(proof, AutomaticPayload).isOkOr:
|
||||
raiseAssert info & ": partPut => " & $error
|
||||
|
||||
# Handle transaction sub-tree
|
||||
if txRoot.isValid:
|
||||
var txs: seq[Transaction]
|
||||
for (key,pyl) in adb.rightPairs LeafTie(root: ps.partGetSubTree txRoot):
|
||||
let
|
||||
inx = key.path.to(UInt256).truncate(uint)
|
||||
tx = rlp.decode(pyl.rawBlob, Transaction)
|
||||
#
|
||||
# FIXME: Is this might be a bug in the test data?
|
||||
#
|
||||
# The single item test key is always `128`. For non-single test
|
||||
# lists, the keys are `1`,`2`, ..,`N`, `128` (some single digit
|
||||
# number `N`.)
|
||||
#
|
||||
# Unless the `128` item value is put at the start of the argument
|
||||
# list `txs[]` for `persistTransactions()`, the `tracer` module
|
||||
# will throw an exception at
|
||||
# `doAssert(transactions.calcTxRoot == header.txRoot)` in the
|
||||
# function `traceTransactionImpl()`.
|
||||
#
|
||||
if (inx and 0x80) != 0:
|
||||
txs = @[tx] & txs
|
||||
else:
|
||||
txs.add tx
|
||||
cdb.persistTransactions(num, txRoot, txs)
|
||||
# TODO code needs updating after removal of generic payloads
|
||||
# # Handle transaction sub-tree
|
||||
# if txRoot.isValid:
|
||||
# var txs: seq[Transaction]
|
||||
# for (key,pyl) in adb.rightPairs LeafTie(root: ps.partGetSubTree txRoot):
|
||||
# let
|
||||
# inx = key.path.to(UInt256).truncate(uint)
|
||||
# tx = rlp.decode(pyl.rawBlob, Transaction)
|
||||
# #
|
||||
# # FIXME: Is this might be a bug in the test data?
|
||||
# #
|
||||
# # The single item test key is always `128`. For non-single test
|
||||
# # lists, the keys are `1`,`2`, ..,`N`, `128` (some single digit
|
||||
# # number `N`.)
|
||||
# #
|
||||
# # Unless the `128` item value is put at the start of the argument
|
||||
# # list `txs[]` for `persistTransactions()`, the `tracer` module
|
||||
# # will throw an exception at
|
||||
# # `doAssert(transactions.calcTxRoot == header.txRoot)` in the
|
||||
# # function `traceTransactionImpl()`.
|
||||
# #
|
||||
# if (inx and 0x80) != 0:
|
||||
# txs = @[tx] & txs
|
||||
# else:
|
||||
# txs.add tx
|
||||
# cdb.persistTransactions(num, txRoot, txs)
|
||||
|
||||
# Handle receipts sub-tree
|
||||
if rcptRoot.isValid:
|
||||
var rcpts: seq[Receipt]
|
||||
for (key,pyl) in adb.rightPairs LeafTie(root: ps.partGetSubTree rcptRoot):
|
||||
let
|
||||
inx = key.path.to(UInt256).truncate(uint)
|
||||
rcpt = rlp.decode(pyl.rawBlob, Receipt)
|
||||
# FIXME: See comment at `txRoot` section.
|
||||
if (inx and 0x80) != 0:
|
||||
rcpts = @[rcpt] & rcpts
|
||||
else:
|
||||
rcpts.add rcpt
|
||||
cdb.persistReceipts(rcptRoot, rcpts)
|
||||
# # Handle receipts sub-tree
|
||||
# if rcptRoot.isValid:
|
||||
# var rcpts: seq[Receipt]
|
||||
# for (key,pyl) in adb.rightPairs LeafTie(root: ps.partGetSubTree rcptRoot):
|
||||
# let
|
||||
# inx = key.path.to(UInt256).truncate(uint)
|
||||
# rcpt = rlp.decode(pyl.rawBlob, Receipt)
|
||||
# # FIXME: See comment at `txRoot` section.
|
||||
# if (inx and 0x80) != 0:
|
||||
# rcpts = @[rcpt] & rcpts
|
||||
# else:
|
||||
# rcpts.add rcpt
|
||||
# cdb.persistReceipts(rcptRoot, rcpts)
|
||||
|
||||
# Save keys to database
|
||||
for (rvid,key) in ps.vkPairs:
|
||||
@ -121,7 +122,9 @@ proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) =
|
||||
#if true: quit()
|
||||
|
||||
# use tracerTestGen.nim to generate additional test data
|
||||
proc testFixtureImpl(node: JsonNode, testStatusIMPL: var TestStatus, memoryDB: CoreDbRef) =
|
||||
proc testFixtureImpl(node: JsonNode, testStatusIMPL: var TestStatus, memoryDB: CoreDbRef) {.deprecated: "needs fixing for non-generic payloads".} =
|
||||
block:
|
||||
return
|
||||
setErrorLevel()
|
||||
|
||||
var
|
||||
|
Loading…
x
Reference in New Issue
Block a user