Aristo db traversal helpers (#1638)

* Misc fixes

detail:
* Fix de-serialisation for account leafs
* Update node recovery from unit tests

* Remove `LegacyAccount` from `PayloadRef` object

why:
  Legacy accounts use a hash key as storage root which is detrimental
  to the working of the Aristo database which uses a vertex ID.

* Dissolve `hashify_helper` into `aristo_utils` and `aristo_transcode`

why:
  Functions are of general interest so they should live in first level
  code files.

* Added left/right iterators over leaf nodes

* Some helper/wrapper functions that might be useful
This commit is contained in:
Jordan Hrycaj 2023-07-13 00:03:14 +01:00 committed by GitHub
parent f6674acda2
commit 56d5c382d7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 463 additions and 190 deletions

View File

@ -16,19 +16,24 @@
import
eth/common,
aristo/aristo_desc/[aristo_types_identifiers, aristo_types_structural],
aristo/[aristo_constants, aristo_desc, aristo_init, aristo_transaction]
aristo/[aristo_constants, aristo_desc, aristo_init, aristo_nearby,
aristo_transaction, aristo_transcode, aristo_utils]
export
aristo_constants,
aristo_transaction,
aristo_types_identifiers,
aristo_types_structural,
aristo_nearby,
aristo_utils,
AristoBackendType,
AristoDbRef,
AristoError,
append,
init,
isValid,
finish
finish,
read
# End

View File

@ -18,8 +18,7 @@ import
eth/common,
stew/[interval_set, results],
./aristo_init/[aristo_memory, aristo_rocksdb],
"."/[aristo_desc, aristo_get, aristo_init, aristo_vid],
./aristo_hashify/hashify_helper,
"."/[aristo_desc, aristo_get, aristo_init, aristo_vid, aristo_utils],
./aristo_check/[check_be, check_cache]
# ------------------------------------------------------------------------------

View File

@ -14,9 +14,8 @@ import
std/[algorithm, sequtils, sets, tables],
eth/common,
stew/interval_set,
../aristo_hashify/hashify_helper,
../aristo_init/[aristo_memory, aristo_rocksdb],
".."/[aristo_desc, aristo_get, aristo_vid]
".."/[aristo_desc, aristo_get, aristo_vid, aristo_transcode, aristo_utils]
const
Vid2 = @[VertexID(2)].toHashSet
@ -108,7 +107,7 @@ proc checkBE*[T](
if rx.isErr:
return err((vid,CheckBeKeyCantCompile))
if not relax:
let expected = rx.value.toHashKey
let expected = rx.value.to(HashKey)
if expected != key:
return err((vid,CheckBeKeyMismatch))
discard vids.reduce Interval[VertexID,uint64].new(vid,vid)
@ -160,7 +159,7 @@ proc checkBE*[T](
let rc = vtx.toNode db # compile cache first
if rc.isErr:
return err((vid,CheckBeCacheKeyCantCompile))
let expected = rc.value.toHashKey
let expected = rc.value.to(HashKey)
if expected != lbl.key:
return err((vid,CheckBeCacheKeyMismatch))

View File

@ -14,8 +14,7 @@ import
std/[sequtils, sets, tables],
eth/common,
stew/results,
../aristo_hashify/hashify_helper,
".."/[aristo_desc, aristo_get]
".."/[aristo_desc, aristo_get, aristo_transcode, aristo_utils]
# ------------------------------------------------------------------------------
# Public functions
@ -33,7 +32,7 @@ proc checkCacheStrict*(
let lbl = db.top.kMap.getOrVoid vid
if not lbl.isValid:
return err((vid,CheckStkVtxKeyMissing))
if lbl.key != rc.value.toHashKey:
if lbl.key != rc.value.to(HashKey):
return err((vid,CheckStkVtxKeyMismatch))
let revVid = db.top.pAmk.getOrVoid lbl
@ -63,7 +62,7 @@ proc checkCacheRelaxed*(
let lbl = db.top.kMap.getOrVoid vid
if not lbl.isValid:
return err((vid,CheckRlxVtxKeyMissing))
if lbl.key != rc.value.toHashKey:
if lbl.key != rc.value.to(HashKey):
return err((vid,CheckRlxVtxKeyMismatch))
let revVid = db.top.pAmk.getOrVoid lbl
@ -78,7 +77,7 @@ proc checkCacheRelaxed*(
if vtx.isValid:
let rc = vtx.toNode db
if rc.isOk:
if lbl.key != rc.value.toHashKey:
if lbl.key != rc.value.to(HashKey):
return err((vid,CheckRlxVtxKeyMismatch))
let revVid = db.top.pAmk.getOrVoid lbl

View File

@ -28,8 +28,9 @@ const
## Equivalent of `nil` for `Account` object code hash
VOID_HASH_KEY* = EMPTY_ROOT_HASH.to(HashKey)
## Equivalent of `nil` for Merkle hash ket
## Void equivalent for Merkle hash value
VOID_HASH_LABEL* = HashLabel(root: VertexID(0), key: VOID_HASH_KEY)
## Void equivalent for Merkle hash value
# End

View File

@ -14,7 +14,7 @@ import
std/[algorithm, sequtils, sets, strutils, tables],
eth/[common, trie/nibbles],
stew/byteutils,
"."/[aristo_constants, aristo_desc, aristo_hike, aristo_init, aristo_vid],
"."/[aristo_constants, aristo_desc, aristo_hike, aristo_init],
./aristo_init/[aristo_memory, aristo_rocksdb]
# ------------------------------------------------------------------------------
@ -162,12 +162,6 @@ proc ppPayload(p: PayloadRef, db: AristoDbRef): string =
result &= $p.account.balance & ","
result &= p.account.storageID.ppVid & ","
result &= p.account.codeHash.to(HashKey).ppCodeKey() & ")"
of LegacyAccount:
result = "("
result &= $p.legaAcc.nonce & ","
result &= $p.legaAcc.balance & ","
result &= p.legaAcc.storageRoot.to(HashKey).ppRootKey() & ","
result &= p.legaAcc.codeHash.to(HashKey).ppCodeKey() & ")"
proc ppVtx(nd: VertexRef, db: AristoDbRef, vid: VertexID): string =
if not nd.isValid:
@ -395,19 +389,6 @@ proc ppCache(
# Public functions
# ------------------------------------------------------------------------------
proc lblToVtxID*(db: AristoDbRef, lbl: HashLabel): VertexID =
## Associate a vertex ID with the argument `key` for pretty printing.
if lbl.isValid:
let vid = db.xMap.getOrVoid lbl
if vid.isValid:
result = vid
else:
result = db.vidFetch()
db.xMap[lbl] = result
proc hashToVtxID*(db: AristoDbRef; root: VertexID; hash: Hash256): VertexID =
db.lblToVtxID HashLabel(root: root, key: hash.to(HashKey))
proc pp*(key: HashKey): string =
key.ppKey

View File

@ -202,4 +202,11 @@ type
TxCacheKeyFetchFail
TxBeKeyFetchFail
# Miscelaneous handy helpers
PayloadTypeUnsupported
AccountRlpDecodingError
AccountStorageKeyMissing
AccountVtxUnsupported
AccountNodeUnsupported
# End

View File

@ -106,6 +106,30 @@ func `<`*(a, b: HashID): bool = a.u256 < b.u256
func cmp*(x, y: HashID): int = cmp(x.UInt256, y.UInt256)
# ------------------------------------------------------------------------------
# Public helpers: `LeafTie`
# ------------------------------------------------------------------------------
func high*(_: type LeafTie; root = VertexID(1)): LeafTie =
## Highest possible `LeafTie` object for given root vertex.
LeafTie(root: root, path: high(HashID))
func low*(_: type LeafTie; root = VertexID(1)): LeafTie =
## Lowest possible `LeafTie` object for given root vertex.
LeafTie(root: root, path: low(HashID))
func `+`*(lty: LeafTie, n: int): LeafTie =
## Return a `LeafTie` object with incremented path field. This function
## will not check for a path field overflow. Neither it will verify that
## the argument `n` is non-negative.
LeafTie(root: lty.root, path: HashID(lty.path.u256 + n.u256))
func `-`*(lty: LeafTie, n: int): LeafTie =
## Return a `LeafTie` object with decremented path field. This function
## will not check for a path field underflow. Neither it will verify that
## the argument `n` is non-negative.
LeafTie(root: lty.root, path: HashID(lty.path.u256 - n.u256))
# ------------------------------------------------------------------------------
# Public helpers: Conversions between `HashID`, `HashKey`, `Hash256`
# ------------------------------------------------------------------------------

View File

@ -49,7 +49,6 @@ type
RawData ## Generic data
RlpData ## Marked RLP encoded
AccountData ## `Aristo account` with vertex IDs links
LegacyAccount ## Legacy `Account` with hash references
PayloadRef* = ref object
case pType*: PayloadType
@ -59,8 +58,6 @@ type
rlpBlob*: Blob ## Opaque data marked RLP encoded
of AccountData:
account*: AristoAccount
of LegacyAccount:
legaAcc*: Account ## Expanded accounting data
VertexRef* = ref object of RootRef
## Vertex for building a hexary Patricia or Merkle Patricia Trie
@ -103,9 +100,6 @@ proc `==`*(a, b: PayloadRef): bool =
of AccountData:
if a.account != b.account:
return false
of LegacyAccount:
if a.legaAcc != b.legaAcc:
return false
true
proc `==`*(a, b: VertexRef): bool =
@ -165,10 +159,6 @@ proc dup*(pld: PayloadRef): PayloadRef =
PayloadRef(
pType: AccountData,
account: pld.account)
of LegacyAccount:
PayloadRef(
pType: LegacyAccount,
legaAcc: pld.legaAcc)
proc dup*(vtx: VertexRef): VertexRef =
## Duplicate vertex.
@ -192,6 +182,31 @@ proc dup*(vtx: VertexRef): VertexRef =
vType: Branch,
bVid: vtx.bVid)
proc dup*(node: NodeRef): NodeRef =
## Duplicate node.
# Not using `deepCopy()` here (some `gc` needs `--deepcopy:on`.)
if node.isNil:
NodeRef(nil)
else:
case node.vType:
of Leaf:
NodeRef(
vType: Leaf,
lPfx: node.lPfx,
lData: node.ldata.dup,
key: node.key)
of Extension:
NodeRef(
vType: Extension,
ePfx: node.ePfx,
eVid: node.eVid,
key: node.key)
of Branch:
NodeRef(
vType: Branch,
bVid: node.bVid,
key: node.key)
proc to*(node: NodeRef; T: type VertexRef): T =
## Extract a copy of the `VertexRef` part from a `NodeRef`.
node.VertexRef.dup

View File

@ -46,8 +46,8 @@ import
chronicles,
eth/common,
stew/[interval_set, results],
"."/[aristo_desc, aristo_get, aristo_hike, aristo_vid],
./aristo_hashify/hashify_helper
"."/[aristo_desc, aristo_get, aristo_hike, aristo_transcode, aristo_utils,
aristo_vid]
type
BackVidValRef = ref object
@ -149,7 +149,7 @@ proc leafToRootHasher(
# Check against existing key, or store new key
let
key = rc.value.toHashKey
key = rc.value.to(HashKey)
rx = db.updateHashKey(hike.root, wp.vid, key, bg)
if rx.isErr:
return err((wp.vid,rx.error))
@ -179,7 +179,7 @@ proc deletedLeafHasher(
let rc = wp.vtx.toNode(db, stopEarly=false)
if rc.isOk:
let
expected = rc.value.toHashKey
expected = rc.value.to(HashKey)
key = db.getKey wp.vid
if key.isValid:
if key != expected:
@ -228,8 +228,8 @@ proc hashify*(
db: AristoDbRef; # Database, top layer
): Result[HashSet[VertexID],(VertexID,AristoError)] =
## Add keys to the `Patricia Trie` so that it becomes a `Merkle Patricia
## Tree`. If successful, the function returns the key (aka Merkle hash) of
## the root vertex.
## Tree`. If successful, the function returns the keys (aka Merkle hash) of
## the root vertices.
var
roots: HashSet[VertexID]
completed: HashSet[VertexID]
@ -307,7 +307,7 @@ proc hashify*(
else:
# Update Merkle hash
let
key = rc.value.toHashKey
key = rc.value.to(HashKey)
rx = db.updateHashKey(val.root, vid, key, val.onBe)
if rx.isErr:
return err((vid,rx.error))

View File

@ -1,78 +0,0 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
#std/[tables],
eth/common,
stew/results,
".."/[aristo_constants, aristo_desc, aristo_get, aristo_transcode]
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc toNode*(
vtx: VertexRef; # Vertex to convert
db: AristoDbRef; # Database, top layer
stopEarly = true; # Full list of missing links if `false`
): Result[NodeRef,seq[VertexID]] =
## Convert argument vertex to node
case vtx.vType:
of Leaf:
let node = NodeRef(vType: Leaf, lPfx: vtx.lPfx, lData: vtx.lData)
# Need to resolve storage root for account leaf
if vtx.lData.pType == AccountData:
let vid = vtx.lData.account.storageID
if vid.isValid:
let key = db.getKey vid
if not key.isValid:
return err(@[vid])
node.key[0] = key
return ok node
of Branch:
let node = NodeRef(vType: Branch, bVid: vtx.bVid)
var missing: seq[VertexID]
for n in 0 .. 15:
let vid = vtx.bVid[n]
if vid.isValid:
let key = db.getKey vid
if key.isValid:
node.key[n] = key
else:
missing.add vid
if stopEarly:
break
else:
node.key[n] = VOID_HASH_KEY
if 0 < missing.len:
return err(missing)
return ok node
of Extension:
let
vid = vtx.eVid
key = db.getKey vid
if key.isValid:
let node = NodeRef(vType: Extension, ePfx: vtx.ePfx, eVid: vid)
node.key[0] = key
return ok node
return err(@[vid])
# This function cannot go into `aristo_desc` as it depends on `aristo_transcode`
# which depends on `aristo_desc`.
proc toHashKey*(node: NodeRef): HashKey =
## Convert argument `node` to Merkle hash key
node.encode.digestTo(HashKey)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -612,6 +612,29 @@ proc merge*(
(merged, dups, AristoError(0))
proc merge*(
db: AristoDbRef; # Database, top layer
path: HashID; # Path into database
rlpData: Blob; # RLP encoded payload data
): Result[void,AristoError] =
## Variant of `merge()` for storing a single item. This function stores the
## arguments as a `LeafTiePayload` type item on the main tree with root
## vertex ID 1. This is handy when used on a temporary backendless `Aristo`
## database. Having merged some leafs and sunsequently hashified (see
## `hashify()`), the state root is avaliable as Merkle hash key for
## vertex ID 1 (see `getKey()`.)
##
let hike = db.merge LeafTiePayload(
leafTie: LeafTie(
root: VertexID(1),
path: path),
payload: PayloadRef(
pType: RlpData,
rlpBlob: rlpData))
if hike.error == AristoError(0):
return err(hike.error)
ok()
# ---------------------
proc merge*(

View File

@ -70,6 +70,14 @@ proc branchNibbleMax*(vtx: VertexRef; maxInx: int8): int8 =
return n
-1
# ------------------
proc toTLeafTiePayload(hike: Hike): (LeafTie,PayloadRef) =
## Shortcut for iterators. This function will gloriously crash unless the
## `hike` argument is complete.
(LeafTie(root: hike.root, path: hike.to(NibblesSeq).pathToTag.value),
hike.legs[^1].wp.vtx.lData)
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
@ -418,6 +426,48 @@ proc right*(
return err(rc.error)
ok LeafTie(root: lty.root, path: rc.value)
iterator right*(
db: AristoDbRef; # Database layer
start = low(LeafTie); # Before or at first value
): (LeafTie,PayloadRef) =
## Traverse the sub-trie implied by the argument `start` with increasing
## order.
var
hike = start.hikeUp db
rc = hike.right db
while rc.isOK:
hike = rc.value
let (key, pyl) = hike.toTLeafTiePayload
yield (key, pyl)
if high(HashID) <= key.path:
break
# Increment `key` by one and update `hike`. In many cases, the current
# `hike` can be modified and re-used which saves some database lookups.
block:
let tail = hike.legs[^1].wp.vtx.lPfx
if 0 < tail.len:
let topNibble = tail[tail.len - 1]
if topNibble < 15:
let newNibble = @[topNibble+1].initNibbleRange.slice(1)
hike.tail = tail.slice(0, tail.len - 1) & newNibble
hike.legs.setLen(hike.legs.len - 1)
break
if 1 < tail.len:
let nxtNibble = tail[tail.len - 2]
if nxtNibble < 15:
let dblNibble = @[((nxtNibble+1) shl 4) + 0].initNibbleRange
hike.tail = tail.slice(0, tail.len - 2) & dblNibble
hike.legs.setLen(hike.legs.len - 1)
break
# Fall back to default method
hike = (key + 1).hikeUp db
rc = hike.right db
# End while
# ----------------
proc left*(
hike: Hike; # Partially expanded chain of vertices
db: AristoDbRef; # Database layer
@ -438,6 +488,48 @@ proc left*(
return err(rc.error)
ok LeafTie(root: lty.root, path: rc.value)
iterator left*(
db: AristoDbRef; # Database layer
start = high(LeafTie); # Before or at first value
): (LeafTie,PayloadRef) =
## Traverse the sub-trie implied by the argument `start` with decreasing
## order. It will stop at any error. In order to reproduce an error, one
## can run the function `left()` on the last returned `LiefTie` item with
## the `path` field decremented by `1`.
var
hike = start.hikeUp db
rc = hike.left db
while rc.isOK:
hike = rc.value
let (key, pyl) = hike.toTLeafTiePayload
yield (key, pyl)
if key.path <= low(HashID):
break
# Decrement `key` by one and update `hike`. In many cases, the current
# `hike` can be modified and re-used which saves some database lookups.
block:
let tail = hike.legs[^1].wp.vtx.lPfx
if 0 < tail.len:
let topNibble = tail[tail.len - 1]
if 0 < topNibble:
let newNibble = @[topNibble - 1].initNibbleRange.slice(1)
hike.tail = tail.slice(0, tail.len - 1) & newNibble
hike.legs.setLen(hike.legs.len - 1)
break
if 1 < tail.len:
let nxtNibble = tail[tail.len - 2]
if 0 < nxtNibble:
let dblNibble = @[((nxtNibble-1) shl 4) + 15].initNibbleRange
hike.tail = tail.slice(0, tail.len - 2) & dblNibble
hike.legs.setLen(hike.legs.len - 1)
break
# Fall back to default method
hike = (key - 1).hikeUp db
rc = hike.left db
# End while
# ------------------------------------------------------------------------------
# Public debugging helpers
# ------------------------------------------------------------------------------

View File

@ -363,6 +363,17 @@ proc right*(
return err((VertexID(0),TxTopHandleExpected))
lty.right tdb.db
iterator right*(
tdb: AristoTxRef; # Database layer
start = low(LeafTie); # Before or at first value
): (LeafTie,PayloadRef) =
## Traverse the sub-trie implied by the argument `start` with increasing
## order. For details see `aristo_nearby.left()`.
if not tdb.db.isNil and not tdb.parent.isNil:
for (k,v) in tdb.db.right start:
yield (k,v)
proc left*(
lty: LeafTie; # Some `Patricia Trie` path
tdb: AristoTxRef; # Database, transaction wrapper
@ -373,6 +384,16 @@ proc left*(
return err((VertexID(0),TxTopHandleExpected))
lty.left tdb.db
iterator left*(
tdb: AristoTxRef; # Database layer
start = high(LeafTie); # Before or at first value
): (LeafTie,PayloadRef) =
## Traverse the sub-trie implied by the argument `start` with decreasing
## order. For details see `aristo_nearby.left()`.
if not tdb.db.isNil and not tdb.parent.isNil:
for (k,v) in tdb.db.left start:
yield (k,v)
# ------------------------------------------------------------------------------
# Public helpers, miscellaneous
# ------------------------------------------------------------------------------

View File

@ -46,8 +46,6 @@ proc toPayloadBlob(node: NodeRef): Blob =
result = pyl.rawBlob
of RlpData:
result = pyl.rlpBlob
of LegacyAccount:
result = rlp.encode pyl.legaAcc
of AccountData:
let key = if pyl.account.storageID.isValid: node.key[0] else: VOID_HASH_KEY
result = rlp.encode Account(
@ -155,6 +153,12 @@ proc append*(writer: var RlpWriter; node: NodeRef) =
writer.append node.lPfx.hexPrefixEncode(isleaf = true)
writer.append node.toPayloadBlob
# ---------------------
proc to*(node: NodeRef; T: type HashKey): T =
## Convert the argument `node` to the corresponding Merkle hash key
node.encode.digestTo T
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
@ -167,8 +171,6 @@ proc blobify*(pyl: PayloadRef): Blob =
result = pyl.rawBlob & @[0xff.byte]
of RlpData:
result = pyl.rlpBlob & @[0xaa.byte]
of LegacyAccount:
result = pyl.legaAcc.encode & @[0xaa.byte] # also RLP data
of AccountData:
var mask: byte
@ -337,7 +339,7 @@ proc deblobify(data: Blob; pyl: var PayloadRef): AristoError =
case mask and 0xc0:
of 0x00:
discard
pAcc.account.codeHash = VOID_CODE_HASH
of 0x80:
if data.len < start + 33:
return DeblobPayloadTooShortInt256

View File

@ -0,0 +1,157 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Aristo DB -- Handy Helpers
## ==========================
##
{.push raises: [].}
import
eth/common,
stint,
results,
"."/[aristo_desc, aristo_get]
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
proc rootHash*(db: AristoDbRef; root = VertexID(1)): Hash256 =
## Shortcut
db.getKey(root).to(Hash256)
# ------------------------------------------------------------------------------
# Public functions, converters
# ------------------------------------------------------------------------------
proc toAccount*(
payload: PayloadRef;
db: AristoDbRef;
): Result[Account,AristoError] =
## Converts the argument `payload` to an `Account` type. If the implied
## account das a storage slots system associated, the database `db` must
## contain the Merkle hash key of the root vertex.
case payload.pType:
of RlpData:
try:
return ok(rlp.decode(payload.rlpBlob, Account))
except RlpError:
return err(AccountRlpDecodingError)
of AccountData:
var acc = Account(
nonce: payload.account.nonce,
balance: payload.account.balance,
codeHash: payload.account.codehash,
storageRoot: EMPTY_ROOT_HASH)
if payload.account.storageID.isValid:
let key = db.getKey payload.account.storageID
if not key.isValid:
return err(AccountStorageKeyMissing)
acc.storageRoot = key.to(Hash256)
return ok(acc)
else:
discard
err PayloadTypeUnsupported
proc toAccount*(
vtx: VertexRef;
db: AristoDbRef;
): Result[Account,AristoError] =
## Variant of `toAccount()` for a `Leaf` vertex.
if vtx.isValid and vtx.vType == Leaf:
return vtx.lData.toAccount db
err AccountVtxUnsupported
proc toAccount*(
node: NodeRef;
): Result[Account,AristoError] =
## Variant of `toAccount()` for a `Leaf` node which must be complete (i.e.
## a potential Merkle hash key must have been initialised.)
if node.isValid and node.vType == Leaf:
case node.lData.pType:
of RlpData:
try:
return ok(rlp.decode(node.lData.rlpBlob, Account))
except RlpError:
return err(AccountRlpDecodingError)
of AccountData:
var acc = Account(
nonce: node.lData.account.nonce,
balance: node.lData.account.balance,
codeHash: node.lData.account.codehash,
storageRoot: EMPTY_ROOT_HASH)
if node.lData.account.storageID.isValid:
if not node.key[0].isValid:
return err(AccountStorageKeyMissing)
acc.storageRoot = node.key[0].to(Hash256)
return ok(acc)
else:
return err(PayloadTypeUnsupported)
err AccountNodeUnsupported
# ---------------------
proc toNode*(
vtx: VertexRef; # Vertex to convert
db: AristoDbRef; # Database, top layer
stopEarly = true; # Full list of missing links if `false`
): Result[NodeRef,seq[VertexID]] =
## Convert argument the vertex `vtx` to a node type. Missing Merkle hash
## keys are searched for on the argument database `db`.
##
## On error, at least the vertex ID of the first missing Merkle hash key is
## returned. If the argument `stopEarly` is set `false`, all missing Merkle
## hash keys are returned.
##
case vtx.vType:
of Leaf:
let node = NodeRef(vType: Leaf, lPfx: vtx.lPfx, lData: vtx.lData)
# Need to resolve storage root for account leaf
if vtx.lData.pType == AccountData:
let vid = vtx.lData.account.storageID
if vid.isValid:
let key = db.getKey vid
if not key.isValid:
return err(@[vid])
node.key[0] = key
return ok node
of Branch:
let node = NodeRef(vType: Branch, bVid: vtx.bVid)
var missing: seq[VertexID]
for n in 0 .. 15:
let vid = vtx.bVid[n]
if vid.isValid:
let key = db.getKey vid
if key.isValid:
node.key[n] = key
else:
missing.add vid
if stopEarly:
break
else:
node.key[n] = VOID_HASH_KEY
if 0 < missing.len:
return err(missing)
return ok node
of Extension:
let
vid = vtx.eVid
key = db.getKey vid
if key.isValid:
let node = NodeRef(vType: Extension, ePfx: vtx.ePfx, eVid: vid)
node.key[0] = key
return ok node
return err(@[vid])
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -36,6 +36,15 @@ proc convertPartially(
vType: Leaf,
lPfx: vtx.lPfx,
lData: vtx.lData)
if vtx.lData.pType != AccountData:
return
let vid = vtx.lData.account.storageID
if vid.isValid:
let lbl = db.top.kMap.getOrVoid vid
if lbl.isValid:
nd.key[0] = lbl.key
return
result.add vid
of Extension:
nd = NodeRef(
vType: Extension,
@ -70,7 +79,15 @@ proc convertPartiallyOk(
vType: Leaf,
lPfx: vtx.lPfx,
lData: vtx.lData)
result = true
if vtx.lData.pType != AccountData:
result = true
else:
let vid = vtx.lData.account.storageID
if vid.isValid:
let lbl = db.top.kMap.getOrVoid vid
if lbl.isValid:
nd.key[0] = lbl.key
result = true
of Extension:
nd = NodeRef(
vType: Extension,

View File

@ -113,9 +113,16 @@ proc test_transcodeAccounts*(
case node.vType:
of aristo_desc.Leaf:
let account = node.lData.rawBlob.decode(Account)
node.lData = PayloadRef(pType: LegacyAccount, legaAcc: account)
discard adb.hashToVtxID(VertexID(1), node.lData.legaAcc.storageRoot)
discard adb.hashToVtxID(VertexID(1), node.lData.legaAcc.codeHash)
node.key[0] = account.storageRoot.to(HashKey)
node.lData = PayloadRef(
pType: AccountData,
account: AristoAccount(
nonce: account.nonce,
balance: account.balance,
codeHash: account.codehash,
storageID: adb.vidAttach HashLabel(
root: VertexID(1),
key: account.storageRoot.to(HashKey))))
of aristo_desc.Extension:
# key <-> vtx correspondence
check node.key[0] == node0.key[0]
@ -144,12 +151,6 @@ proc test_transcodeAccounts*(
check node1.error == AristoError(0)
block:
# `deblobify()` will always decode to `BlobData` type payload
if node1.vType == aristo_desc.Leaf:
# Node that deserialisation of the account stops at the RLP encoding
let account = node1.lData.rlpBlob.decode(Account)
node1.lData = PayloadRef(pType: LegacyAccount, legaAcc: account)
if node != node1:
check node == node1
noisy.say "***", "count=", count, " node=", node.pp(adb)

View File

@ -27,6 +27,10 @@ type
KnownHasherFailure* = seq[(string,(int,AristoError))]
## (<sample-name> & "#" <instance>, (<vertex-id>,<error-symbol>))
const
WalkStopRc =
Result[LeafTie,(VertexID,AristoError)].err((VertexID(0),NearbyBeyondRange))
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
@ -207,74 +211,78 @@ proc saveToBackendWithOops(
proc fwdWalkVerify(
tx: AristoTxRef;
root: VertexID;
left: HashSet[LeafTie];
leftOver: HashSet[LeafTie];
noisy: bool;
debugID: int;
): bool =
let
nLeafs = left.len
nLeafs = leftOver.len
var
lfLeft = left
lty = LeafTie(root: root)
leftOver = leftOver
last = LeafTie()
n = 0
while n < nLeafs + 1:
let id = n + (nLeafs + 1) * debugID
let rc = lty.right tx
if rc.isErr:
if rc.error[1] == NearbyBeyondRange and lfLeft.len == 0:
return true
check rc.error == (0,0)
check lfLeft.len == 0
for (key,_) in tx.right low(LeafTie,root):
if key notin leftOver:
noisy.say "*** fwdWalkVerify", " id=", n + (nLeafs + 1) * debugID
check key in leftOver
return
if rc.value notin lfLeft:
check rc.error == (0,0)
return
if rc.value.path < high(HashID):
lty.path = HashID(rc.value.path.u256 + 1)
lfLeft.excl rc.value
leftOver.excl key
last = key
n.inc
check n <= nLeafs
# Verify stop condition
if last.root == VertexID(0):
last = low(LeafTie,root)
elif last != high(LeafTie,root):
last = last + 1
let rc = last.right tx
if rc.isOk or rc.error[1] != NearbyBeyondRange:
check rc == WalkStopRc
return
if n != nLeafs:
check n == nLeafs
return
true
proc revWalkVerify(
tx: AristoTxRef;
root: VertexID;
left: HashSet[LeafTie];
leftOver: HashSet[LeafTie];
noisy: bool;
debugID: int;
): bool =
let
nLeafs = left.len
nLeafs = leftOver.len
var
lfLeft = left
lty = LeafTie(root: root, path: HashID(high(UInt256)))
leftOver = leftOver
last = LeafTie()
n = 0
while n < nLeafs + 1:
let id = n + (nLeafs + 1) * debugID
let rc = lty.left tx
if rc.isErr:
if rc.error[1] == NearbyBeyondRange and lfLeft.len == 0:
return true
check rc.error == (0,0)
check lfLeft.len == 0
for (key,_) in tx.left high(LeafTie,root):
if key notin leftOver:
noisy.say "*** revWalkVerify", " id=", n + (nLeafs + 1) * debugID
check key in leftOver
return
if rc.value notin lfLeft:
check rc.error == (0,0)
return
if low(HashID) < rc.value.path:
lty.path = HashID(rc.value.path.u256 - 1)
lfLeft.excl rc.value
leftOver.excl key
last = key
n.inc
check n <= nLeafs
# Verify stop condition
if last.root == VertexID(0):
last = high(LeafTie,root)
elif last != low(LeafTie,root):
last = last - 1
let rc = last.left tx
if rc.isOk or rc.error[1] != NearbyBeyondRange:
check rc == WalkStopRc
return
if n != nLeafs:
check n == nLeafs
return
true
# ------------------------------------------------------------------------------
# Public test function