Aristo db update for short nodes key edge cases (#1887)

* Aristo: Provide key-value list signature calculator

detail:
  Simple wrappers around `Aristo` core functionality

* Update new API for `CoreDb`

details:
+ Renamed new API functions `contains()` => `hasKey()` or `hasPath()`
  which disables the `in` operator on non-boolean 	`contains()` functions
+ The functions `get()` and `fetch()` always return a not-found error if
  there is no item, available. The new functions `getOrEmpty()` and
  `mergeOrEmpty()` return an an empty `Blob` if there is no such key
  found.

* Rewrite `core_apps.nim` using new API from `CoreDb`

* Use `Aristo` functionality for calculating Merkle signatures

details:
  For debugging, the `VerifyAristoForMerkleRootCalc` can be set so
  that `Aristo` results will be verified against the legacy versions.

* Provide general interface for Merkle signing key-value tables

details:
  Export `Aristo` wrappers

* Activate `CoreDb` tests

why:
  Now, API seems to be stable enough for general tests.

* Update `toHex()` usage

why:
  Byteutils' `toHex()` is superior to `toSeq.mapIt(it.toHex(2)).join`

* Split `aristo_transcode` => `aristo_serialise` + `aristo_blobify`

why:
+ Different modules for different purposes
+ `aristo_serialise`: RLP encoding/decoding
+ `aristo_blobify`: Aristo database encoding/decoding

* Compacted representation of small nodes' links instead of Keccak hashes

why:
  Ethereum MPTs use Keccak hashes as node links if the size of an RLP
  encoded node is at least 32 bytes. Otherwise, the RLP encoded node
  value is used as a pseudo node link (rather than a hash.) Such a node
  is nor stored on key-value database. Rather the RLP encoded node value
  is stored instead of a lode link in a parent node instead. Only for
  the root hash, the top level node is always referred to by the hash.

  This feature needed an abstraction of the `HashKey` object which is now
  either a hash or a blob of length at most 31 bytes. This leaves two
  ways of representing an empty/void `HashKey` type, either as an empty
  blob of zero length, or the hash of an empty blob.

* Update `CoreDb` interface (mainly reducing logger noise)

* Fix copyright years (to make `Lint` happy)
This commit is contained in:
Jordan Hrycaj 2023-11-08 12:18:32 +00:00 committed by GitHub
parent 03a739ff1b
commit 4feaa2cfab
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
47 changed files with 2216 additions and 764 deletions

View File

@ -14,16 +14,13 @@
{.push raises: [].} {.push raises: [].}
import aristo/[ import aristo/[
aristo_constants, aristo_delete, aristo_fetch, aristo_init, aristo_constants, aristo_delete, aristo_fetch, aristo_init, aristo_merge,
aristo_merge, aristo_nearby, aristo_tx, aristo_utils, aristo_walk] aristo_nearby, aristo_serialise, aristo_sign, aristo_tx, aristo_utils,
aristo_walk]
export export
aristo_constants, aristo_delete, aristo_fetch, aristo_init, aristo_constants, aristo_delete, aristo_fetch, aristo_init, aristo_merge,
aristo_merge, aristo_nearby, aristo_tx, aristo_utils, aristo_walk aristo_nearby, aristo_serialise, aristo_sign, aristo_tx, aristo_utils,
aristo_walk
import
aristo/aristo_transcode
export
append, read, serialise
import import
aristo/aristo_get aristo/aristo_get
@ -50,6 +47,7 @@ export
AristoDbRef, AristoDbRef,
AristoError, AristoError,
AristoTxRef, AristoTxRef,
MerkleSignRef,
forget, forget,
isValid isValid

View File

@ -388,7 +388,7 @@ assumed, i.e. the list with the single vertex ID *1*.
88 +--+--+--+--+--+ .. --+ 88 +--+--+--+--+--+ .. --+
... -- more unused vertex ID ... -- more unused vertex ID
N1 +--+--+--+--+ N1 +--+--+--+--+
|| | -- flg(3) + vtxLen(29), 1st triplet || | -- flg(2) + vtxLen(30), 1st triplet
+--+--+--+--+--+ .. --+ +--+--+--+--+--+ .. --+
| | -- vertex ID of first triplet | | -- vertex ID of first triplet
+--+--+--+--+--+ .. --+--+ .. --+ +--+--+--+--+--+ .. --+--+ .. --+
@ -396,31 +396,30 @@ assumed, i.e. the list with the single vertex ID *1*.
+--+--+--+--+--+ .. --+--+ .. --+ +--+--+--+--+--+ .. --+--+ .. --+
... -- optional vertex record ... -- optional vertex record
N2 +--+--+--+--+ N2 +--+--+--+--+
|| | -- flg(3) + vtxLen(29), 2nd triplet || | -- flg(2) + vtxLen(30), 2nd triplet
+--+--+--+--+ +--+--+--+--+
... ...
+--+ +--+
| | -- marker(8), 0x7d | | -- marker(8), 0x7d
+--+ +--+
where where
+ minimum size of an empty filer is 72 bytes + minimum size of an empty filter is 72 bytes
+ the flg(3) represents the tuple (key-mode,vertex-mode) encoding + the flg(2) represents a bit tuple encoding the serialised storage
the serialised storage states modes for the optional 32 bytes hash key:
0 -- encoded and present 0 -- not encoded, to be ignored
1 -- not encoded, void => considered deleted 1 -- not encoded, void => considered deleted
2 -- not encoded, to be ignored 2 -- present, encoded as-is (32 bytes)
3 -- present, encoded as (len(1),data,zero-padding)
so, when encoded as + the vtxLen(30) is the number of bytes of the optional vertex record
which has maximum size 2^30-2 which is short of 1 GiB. The value
2^30-1 (i.e. 0x3fffffff) is reserverd for indicating that there is
no vertex record following and it should be considered deleted.
flg(3) = key-mode * 3 + vertex-mode + there is no blind entry, i.e. either flg(2) != 0 or vtxLen(30) != 0.
the the tuple (2,2) will never occur and flg(3) < 9
+ the vtxLen(29) is the number of bytes of the optional vertex record
which has maximum size 2^29-1 which is short of 512 MiB
+ the marker(8) is the eight bit array *0111-1101* + the marker(8) is the eight bit array *0111-1101*

View File

@ -12,26 +12,15 @@
import import
std/[bitops, sequtils, sets], std/[bitops, sequtils, sets],
eth/[common, rlp, trie/nibbles], eth/[common, trie/nibbles],
results, results,
stew/endians2, stew/endians2,
"."/[aristo_constants, aristo_desc, aristo_get] ./aristo_desc
# Annotation helper
{.pragma: noRaise, gcsafe, raises: [].}
type
ResolveVidFn = proc(vid: VertexID): Result[HashKey,AristoError] {.noRaise.}
## Resolve storage root vertex ID
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private helper # Private helper
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc aristoError(error: AristoError): NodeRef =
## Allows returning de
NodeRef(vType: Leaf, error: error)
proc load64(data: Blob; start: var int): Result[uint64,AristoError] = proc load64(data: Blob; start: var int): Result[uint64,AristoError] =
if data.len < start + 9: if data.len < start + 9:
return err(DeblobPayloadTooShortInt64) return err(DeblobPayloadTooShortInt64)
@ -46,154 +35,6 @@ proc load256(data: Blob; start: var int): Result[UInt256,AristoError] =
start += 32 start += 32
ok val ok val
proc serialise(
pyl: PayloadRef;
getKey: ResolveVidFn;
): Result[Blob,(VertexID,AristoError)] =
## Encode the data payload of the argument `pyl` as RLP `Blob` if it is of
## account type, otherwise pass the data as is.
##
case pyl.pType:
of RawData:
ok pyl.rawBlob
of RlpData:
ok pyl.rlpBlob
of AccountData:
let
vid = pyl.account.storageID
key = block:
if not vid.isValid:
VOID_HASH_KEY
else:
let rc = vid.getKey
if rc.isErr:
return err((vid,rc.error))
rc.value
ok rlp.encode Account(
nonce: pyl.account.nonce,
balance: pyl.account.balance,
storageRoot: key.to(Hash256),
codeHash: pyl.account.codeHash)
# ------------------------------------------------------------------------------
# Public RLP transcoder mixins
# ------------------------------------------------------------------------------
proc read*(rlp: var Rlp; T: type NodeRef): T {.gcsafe, raises: [RlpError].} =
## Mixin for RLP writer, see `fromRlpRecord()` for an encoder with detailed
## error return code (if needed.) This reader is a jazzed up version which
## reports some particular errors in the `Dummy` type node.
if not rlp.isList:
# Otherwise `rlp.items` would raise a `Defect`
return aristoError(Rlp2Or17ListEntries)
var
blobs = newSeq[Blob](2) # temporary, cache
links: array[16,HashKey] # reconstruct branch node
top = 0 # count entries and positions
# Collect lists of either 2 or 17 blob entries.
for w in rlp.items:
case top
of 0, 1:
if not w.isBlob:
return aristoError(RlpBlobExpected)
blobs[top] = rlp.read(Blob)
of 2 .. 15:
if not links[top].init(rlp.read(Blob)):
return aristoError(RlpBranchLinkExpected)
of 16:
if not w.isBlob:
return aristoError(RlpBlobExpected)
if 0 < rlp.read(Blob).len:
return aristoError(RlpEmptyBlobExpected)
else:
return aristoError(Rlp2Or17ListEntries)
top.inc
# Verify extension data
case top
of 2:
if blobs[0].len == 0:
return aristoError(RlpNonEmptyBlobExpected)
let (isLeaf, pathSegment) = hexPrefixDecode blobs[0]
if isLeaf:
return NodeRef(
vType: Leaf,
lPfx: pathSegment,
lData: PayloadRef(
pType: RawData,
rawBlob: blobs[1]))
else:
var node = NodeRef(
vType: Extension,
ePfx: pathSegment)
if not node.key[0].init(blobs[1]):
return aristoError(RlpExtPathEncoding)
return node
of 17:
for n in [0,1]:
if not links[n].init(blobs[n]):
return aristoError(RlpBranchLinkExpected)
return NodeRef(
vType: Branch,
key: links)
else:
discard
aristoError(Rlp2Or17ListEntries)
proc append*(writer: var RlpWriter; node: NodeRef) =
## Mixin for RLP writer. Note that a `Dummy` node is encoded as an empty
## list.
proc addHashKey(writer: var RlpWriter; key: HashKey) =
if not key.isValid:
writer.append EmptyBlob
else:
writer.append key.to(Hash256)
if node.error != AristoError(0):
writer.startList(0)
else:
case node.vType:
of Branch:
writer.startList(17)
for n in 0..15:
writer.addHashKey node.key[n]
writer.append EmptyBlob
of Extension:
writer.startList(2)
writer.append node.ePfx.hexPrefixEncode(isleaf = false)
writer.addHashKey node.key[0]
of Leaf:
proc getKey0(vid: VertexID): Result[HashKey,AristoError] {.noRaise.} =
ok(node.key[0]) # always succeeds
writer.startList(2)
writer.append node.lPfx.hexPrefixEncode(isleaf = true)
writer.append node.lData.serialise(getKey0).value
# ---------------------
proc to*(node: NodeRef; T: type HashKey): T =
## Convert the argument `node` to the corresponding Merkle hash key
node.encode.digestTo T
proc serialise*(
db: AristoDbRef;
pyl: PayloadRef;
): Result[Blob,(VertexID,AristoError)] =
## Encode the data payload of the argument `pyl` as RLP `Blob` if it is of
## account type, otherwise pass the data as is.
##
proc getKey(vid: VertexID): Result[HashKey,AristoError] =
db.getKeyRc(vid)
pyl.serialise getKey
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions # Public functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -331,12 +172,16 @@ proc blobify*(filter: FilterRef; data: var Blob): Result[void,AristoError] =
## ... -- more triplets ## ... -- more triplets
## 0x7d -- marker(8) ## 0x7d -- marker(8)
## ##
func blobify(lid: HashKey): Blob =
let n = lid.len
if n < 32: @[n.byte] & @lid & 0u8.repeat(31 - n) else: @lid
if not filter.isValid: if not filter.isValid:
return err(BlobifyNilFilter) return err(BlobifyNilFilter)
data.setLen(0) data.setLen(0)
data &= filter.fid.uint64.toBytesBE.toSeq data &= filter.fid.uint64.toBytesBE.toSeq
data &= filter.src.ByteArray32.toSeq data &= @(filter.src.data)
data &= filter.trg.ByteArray32.toSeq data &= @(filter.trg.data)
data &= filter.vGen.len.uint32.toBytesBE.toSeq data &= filter.vGen.len.uint32.toBytesBE.toSeq
data &= newSeq[byte](4) # place holder data &= newSeq[byte](4) # place holder
@ -355,30 +200,28 @@ proc blobify*(filter: FilterRef; data: var Blob): Result[void,AristoError] =
leftOver.excl vid leftOver.excl vid
var var
keyMode = 0u # present and usable keyMode = 0u # default: ignore that key
vtxMode = 0u # present and usable vtxLen = 0u # default: ignore that vertex
keyBlob: Blob keyBlob: Blob
vtxBlob: Blob vtxBlob: Blob
let key = filter.kMap.getOrVoid vid let key = filter.kMap.getOrVoid vid
if key.isValid: if key.isValid:
keyBlob = key.ByteArray32.toSeq keyBlob = key.blobify
keyMode = if key.len < 32: 0xc000_0000u else: 0x8000_0000u
elif filter.kMap.hasKey vid: elif filter.kMap.hasKey vid:
keyMode = 1u # void hash key => considered deleted keyMode = 0x4000_0000u # void hash key => considered deleted
else:
keyMode = 2u # ignore that hash key
if vtx.isValid: if vtx.isValid:
? vtx.blobify vtxBlob ? vtx.blobify vtxBlob
vtxLen = vtxBlob.len.uint
if 0x3fff_ffff <= vtxLen:
return err(BlobifyFilterRecordOverflow)
else: else:
vtxMode = 1u # nil vertex => considered deleted vtxLen = 0x3fff_ffff # nil vertex => considered deleted
if (vtxBlob.len and not 0x1fffffff) != 0:
return err(BlobifyFilterRecordOverflow)
let pfx = ((keyMode * 3 + vtxMode) shl 29) or vtxBlob.len.uint
data &= data &=
pfx.uint32.toBytesBE.toSeq & (keyMode or vtxLen).uint32.toBytesBE.toSeq &
vid.uint64.toBytesBE.toSeq & vid.uint64.toBytesBE.toSeq &
keyBlob & keyBlob &
vtxBlob vtxBlob
@ -387,18 +230,18 @@ proc blobify*(filter: FilterRef; data: var Blob): Result[void,AristoError] =
for vid in leftOver: for vid in leftOver:
n.inc n.inc
var var
mode = 2u # key present and usable, ignore vtx keyMode = 0u # present and usable
keyBlob: Blob keyBlob: Blob
let key = filter.kMap.getOrVoid vid let key = filter.kMap.getOrVoid vid
if key.isValid: if key.isValid:
keyBlob = key.ByteArray32.toSeq keyBlob = key.blobify
keyMode = if key.len < 32: 0xc000_0000u else: 0x8000_0000u
else: else:
mode = 5u # 1 * 3 + 2: void key, ignore vtx keyMode = 0x4000_0000u # void hash key => considered deleted
let pfx = (mode shl 29)
data &= data &=
pfx.uint32.toBytesBE.toSeq & keyMode.uint32.toBytesBE.toSeq &
vid.uint64.toBytesBE.toSeq & vid.uint64.toBytesBE.toSeq &
keyBlob keyBlob
@ -491,7 +334,7 @@ proc deblobify*(record: Blob; vtx: var VertexRef): Result[void,AristoError] =
## De-serialise a data record encoded with `blobify()`. The second ## De-serialise a data record encoded with `blobify()`. The second
## argument `vtx` can be `nil`. ## argument `vtx` can be `nil`.
if record.len < 3: # minimum `Leaf` record if record.len < 3: # minimum `Leaf` record
return err(DeblobTooShort) return err(DeblobVtxTooShort)
case record[^1] shr 6: case record[^1] shr 6:
of 0: # `Branch` vertex of 0: # `Branch` vertex
@ -593,10 +436,16 @@ proc deblobify*(data: Blob; filter: var FilterRef): Result[void,AristoError] =
if data[^1] != 0x7d: if data[^1] != 0x7d:
return err(DeblobWrongType) return err(DeblobWrongType)
func deblob(data: openArray[byte]; shortKey: bool): Result[HashKey,void] =
if shortKey:
HashKey.fromBytes data[1 .. min(data[0],31)]
else:
HashKey.fromBytes data
let f = FilterRef() let f = FilterRef()
f.fid = (uint64.fromBytesBE data[0 ..< 8]).FilterID f.fid = (uint64.fromBytesBE data[0 ..< 8]).FilterID
(addr f.src.ByteArray32[0]).copyMem(unsafeAddr data[8], 32) (addr f.src.data[0]).copyMem(unsafeAddr data[8], 32)
(addr f.trg.ByteArray32[0]).copyMem(unsafeAddr data[40], 32) (addr f.trg.data[0]).copyMem(unsafeAddr data[40], 32)
let let
nVids = uint32.fromBytesBE data[72 ..< 76] nVids = uint32.fromBytesBE data[72 ..< 76]
@ -615,33 +464,33 @@ proc deblobify*(data: Blob; filter: var FilterRef): Result[void,AristoError] =
return err(DeblobFilterTrpTooShort) return err(DeblobFilterTrpTooShort)
let let
flag = data[offs] shr 5 # double triplets: {0,1,2} x {0,1,2} keyFlag = data[offs] shr 6
vLen = ((uint32.fromBytesBE data[offs ..< offs + 4]) and 0x1fffffff).int vtxFlag = ((uint32.fromBytesBE data[offs ..< offs+4]) and 0x3fff_ffff).int
if (vLen == 0) != ((flag mod 3) > 0): vLen = if vtxFlag == 0x3fff_ffff: 0 else: vtxFlag
return err(DeblobFilterTrpVtxSizeGarbled) # contadiction if keyFlag == 0 and vtxFlag == 0:
return err(DeblobFilterTrpVtxSizeGarbled) # no blind records
offs = offs + 4 offs = offs + 4
let vid = (uint64.fromBytesBE data[offs ..< offs + 8]).VertexID let vid = (uint64.fromBytesBE data[offs ..< offs + 8]).VertexID
offs = offs + 8 offs = offs + 8
if data.len < offs + (flag < 3).ord * 32 + vLen: if data.len < offs + (1 < keyFlag).ord * 32 + vLen:
return err(DeblobFilterTrpTooShort) return err(DeblobFilterTrpTooShort)
if flag < 3: # {0} x {0,1,2} if 1 < keyFlag:
var key: HashKey f.kMap[vid] = data[offs ..< offs + 32].deblob(keyFlag == 3).valueOr:
(addr key.ByteArray32[0]).copyMem(unsafeAddr data[offs], 32) return err(DeblobHashKeyExpected)
f.kMap[vid] = key
offs = offs + 32 offs = offs + 32
elif flag < 6: # {0,1} x {0,1,2} elif keyFlag == 1:
f.kMap[vid] = VOID_HASH_KEY f.kMap[vid] = VOID_HASH_KEY
if 0 < vLen: if vtxFlag == 0x3fff_ffff:
f.sTab[vid] = VertexRef(nil)
elif 0 < vLen:
var vtx: VertexRef var vtx: VertexRef
? data[offs ..< offs + vLen].deblobify vtx ? data[offs ..< offs + vLen].deblobify vtx
f.sTab[vid] = vtx f.sTab[vid] = vtx
offs = offs + vLen offs = offs + vLen
elif (flag mod 3) == 1: # {0,1,2} x {1}
f.sTab[vid] = VertexRef(nil)
if data.len != offs + 1: if data.len != offs + 1:
return err(DeblobFilterSizeGarbled) return err(DeblobFilterSizeGarbled)

View File

@ -12,11 +12,11 @@
import import
std/[algorithm, sequtils, sets, tables], std/[algorithm, sequtils, sets, tables],
eth/common, eth/[common, trie/nibbles],
stew/interval_set, stew/interval_set,
../../aristo, ../../aristo,
../aristo_walk/persistent, ../aristo_walk/persistent,
".."/[aristo_desc, aristo_get, aristo_vid, aristo_transcode] ".."/[aristo_desc, aristo_get, aristo_vid]
const const
Vid2 = @[VertexID(2)].toHashSet Vid2 = @[VertexID(2)].toHashSet
@ -98,6 +98,21 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
let rc = db.getKeyBE vid let rc = db.getKeyBE vid
if rc.isErr or not rc.value.isValid: if rc.isErr or not rc.value.isValid:
return err((vid,CheckBeKeyMissing)) return err((vid,CheckBeKeyMissing))
case vtx.vType:
of Leaf:
discard
of Branch:
block check42Links:
var seen = false
for n in 0 .. 15:
if vtx.bVid[n].isValid:
if seen:
break check42Links
seen = true
return err((vid,CheckBeVtxBranchLinksMissing))
of Extension:
if vtx.ePfx.len == 0:
return err((vid,CheckBeVtxExtPfxMissing))
for (_,vid,key) in T.walkKeyBE db: for (_,vid,key) in T.walkKeyBE db:
if not key.isvalid: if not key.isvalid:
@ -109,7 +124,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
if rx.isErr: if rx.isErr:
return err((vid,CheckBeKeyCantCompile)) return err((vid,CheckBeKeyCantCompile))
if not relax: if not relax:
let expected = rx.value.to(HashKey) let expected = rx.value.digestTo(HashKey)
if expected != key: if expected != key:
return err((vid,CheckBeKeyMismatch)) return err((vid,CheckBeKeyMismatch))
discard vids.reduce Interval[VertexID,uint64].new(vid,vid) discard vids.reduce Interval[VertexID,uint64].new(vid,vid)
@ -162,10 +177,11 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
not db.backend.isNil and not db.backend.isNil and
not db.backend.filters.isNil: not db.backend.filters.isNil:
var lastTrg = db.getKeyUBE(VertexID(1)).get(otherwise = VOID_HASH_KEY) var lastTrg = db.getKeyUBE(VertexID(1)).get(otherwise = VOID_HASH_KEY)
.to(Hash256)
for (qid,filter) in db.backend.T.walkFifoBe: # walk in fifo order for (qid,filter) in db.backend.T.walkFifoBe: # walk in fifo order
if filter.src != lastTrg: if filter.src != lastTrg:
return err((VertexID(0),CheckBeFifoSrcTrgMismatch)) return err((VertexID(0),CheckBeFifoSrcTrgMismatch))
if filter.trg != filter.kMap.getOrVoid VertexID(1): if filter.trg != filter.kMap.getOrVoid(VertexID 1).to(Hash256):
return err((VertexID(1),CheckBeFifoTrgNotStateRoot)) return err((VertexID(1),CheckBeFifoTrgNotStateRoot))
lastTrg = filter.trg lastTrg = filter.trg
@ -180,7 +196,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
let rc = vtx.toNode db # compile cache first let rc = vtx.toNode db # compile cache first
if rc.isErr: if rc.isErr:
return err((vid,CheckBeCacheKeyCantCompile)) return err((vid,CheckBeCacheKeyCantCompile))
let expected = rc.value.to(HashKey) let expected = rc.value.digestTo(HashKey)
if expected != lbl.key: if expected != lbl.key:
return err((vid,CheckBeCacheKeyMismatch)) return err((vid,CheckBeCacheKeyMismatch))
@ -192,7 +208,10 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
if 0 < delta.len: if 0 < delta.len:
# Exclude fringe case when there is a single root vertex only # Exclude fringe case when there is a single root vertex only
if vGenExpected != Vid2 or 0 < vGen.len: if vGenExpected != Vid2 or 0 < vGen.len:
return err((delta.toSeq.sorted[^1],CheckBeCacheGarbledVGen)) let delta = delta.toSeq
# As happens with Merkle signature calculator: `root=VertexID(2)`
if delta.len != 1 or delta[0] != VertexID(1) or VertexID(1) in vGen:
return err((delta.sorted[^1],CheckBeCacheGarbledVGen))
ok() ok()

View File

@ -12,9 +12,9 @@
import import
std/[sequtils, sets, tables], std/[sequtils, sets, tables],
eth/common, eth/[common, trie/nibbles],
results, results,
".."/[aristo_desc, aristo_get, aristo_transcode, aristo_utils] ".."/[aristo_desc, aristo_get, aristo_serialise, aristo_utils]
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions # Public functions
@ -32,16 +32,17 @@ proc checkTopStrict*(
let lbl = db.top.kMap.getOrVoid vid let lbl = db.top.kMap.getOrVoid vid
if not lbl.isValid: if not lbl.isValid:
return err((vid,CheckStkVtxKeyMissing)) return err((vid,CheckStkVtxKeyMissing))
if lbl.key != rc.value.to(HashKey): if lbl.key != rc.value.digestTo(HashKey):
return err((vid,CheckStkVtxKeyMismatch)) return err((vid,CheckStkVtxKeyMismatch))
let revVid = db.top.pAmk.getOrVoid lbl let revVids = db.top.pAmk.getOrVoid lbl
if not revVid.isValid: if not revVids.isValid:
return err((vid,CheckStkRevKeyMissing)) return err((vid,CheckStkRevKeyMissing))
if revVid != vid: if vid notin revVids:
return err((vid,CheckStkRevKeyMismatch)) return err((vid,CheckStkRevKeyMismatch))
if 0 < db.top.pAmk.len and db.top.pAmk.len < db.top.sTab.len: let pAmkVtxCount = db.top.pAmk.values.toSeq.foldl(a + b.len, 0)
if 0 < pAmkVtxCount and pAmkVtxCount < db.top.sTab.len:
# Cannot have less changes than cached entries # Cannot have less changes than cached entries
return err((VertexID(0),CheckStkVtxCountMismatch)) return err((VertexID(0),CheckStkVtxCountMismatch))
@ -62,13 +63,13 @@ proc checkTopRelaxed*(
let lbl = db.top.kMap.getOrVoid vid let lbl = db.top.kMap.getOrVoid vid
if not lbl.isValid: if not lbl.isValid:
return err((vid,CheckRlxVtxKeyMissing)) return err((vid,CheckRlxVtxKeyMissing))
if lbl.key != rc.value.to(HashKey): if lbl.key != rc.value.digestTo(HashKey):
return err((vid,CheckRlxVtxKeyMismatch)) return err((vid,CheckRlxVtxKeyMismatch))
let revVid = db.top.pAmk.getOrVoid lbl let revVids = db.top.pAmk.getOrVoid lbl
if not revVid.isValid: if not revVids.isValid:
return err((vid,CheckRlxRevKeyMissing)) return err((vid,CheckRlxRevKeyMissing))
if revVid != vid: if vid notin revVids:
return err((vid,CheckRlxRevKeyMismatch)) return err((vid,CheckRlxRevKeyMismatch))
else: else:
for (vid,lbl) in db.top.kMap.pairs: for (vid,lbl) in db.top.kMap.pairs:
@ -77,15 +78,13 @@ proc checkTopRelaxed*(
if vtx.isValid: if vtx.isValid:
let rc = vtx.toNode db let rc = vtx.toNode db
if rc.isOk: if rc.isOk:
if lbl.key != rc.value.to(HashKey): if lbl.key != rc.value.digestTo(HashKey):
return err((vid,CheckRlxVtxKeyMismatch)) return err((vid,CheckRlxVtxKeyMismatch))
let revVid = db.top.pAmk.getOrVoid lbl let revVids = db.top.pAmk.getOrVoid lbl
if not revVid.isValid: if not revVids.isValid:
return err((vid,CheckRlxRevKeyMissing)) return err((vid,CheckRlxRevKeyMissing))
if revVid != vid: if vid notin revVids:
return err((vid,CheckRlxRevKeyMissing))
if revVid != vid:
return err((vid,CheckRlxRevKeyMismatch)) return err((vid,CheckRlxRevKeyMismatch))
ok() ok()
@ -101,7 +100,23 @@ proc checkTopCommon*(
# Check deleted entries # Check deleted entries
var nNilVtx = 0 var nNilVtx = 0
for (vid,vtx) in db.top.sTab.pairs: for (vid,vtx) in db.top.sTab.pairs:
if not vtx.isValid: if vtx.isValid:
case vtx.vType:
of Leaf:
discard
of Branch:
block check42Links:
var seen = false
for n in 0 .. 15:
if vtx.bVid[n].isValid:
if seen:
break check42Links
seen = true
return err((vid,CheckAnyVtxBranchLinksMissing))
of Extension:
if vtx.ePfx.len == 0:
return err((vid,CheckAnyVtxExtPfxMissing))
else:
nNilVtx.inc nNilVtx.inc
let rc = db.getVtxBE vid let rc = db.getVtxBE vid
if rc.isErr: if rc.isErr:
@ -116,14 +131,16 @@ proc checkTopCommon*(
if kMapNilCount != 0 and kMapNilCount < nNilVtx: if kMapNilCount != 0 and kMapNilCount < nNilVtx:
return err((VertexID(0),CheckAnyVtxEmptyKeyMismatch)) return err((VertexID(0),CheckAnyVtxEmptyKeyMismatch))
if db.top.pAmk.len != kMapCount: let pAmkVtxCount = db.top.pAmk.values.toSeq.foldl(a + b.len, 0)
if pAmkVtxCount != kMapCount:
var knownKeys: HashSet[VertexID] var knownKeys: HashSet[VertexID]
for (key,vid) in db.top.pAmk.pairs: for (key,vids) in db.top.pAmk.pairs:
if not db.top.kMap.hasKey(vid): for vid in vids:
return err((vid,CheckAnyRevVtxMissing)) if not db.top.kMap.hasKey(vid):
if vid in knownKeys: return err((vid,CheckAnyRevVtxMissing))
return err((vid,CheckAnyRevVtxDup)) if vid in knownKeys:
knownKeys.incl vid return err((vid,CheckAnyRevVtxDup))
knownKeys.incl vid
return err((VertexID(0),CheckAnyRevCountMismatch)) # should not apply(!) return err((VertexID(0),CheckAnyRevCountMismatch)) # should not apply(!)
for vid in db.top.pPrf: for vid in db.top.pPrf:

View File

@ -11,6 +11,7 @@
{.push raises: [].} {.push raises: [].}
import import
std/sets,
eth/[common, trie/nibbles], eth/[common, trie/nibbles],
./aristo_desc/desc_identifiers ./aristo_desc/desc_identifiers
@ -24,18 +25,21 @@ const
EmptyVidSeq* = seq[VertexID].default EmptyVidSeq* = seq[VertexID].default
## Useful shortcut ## Useful shortcut
EmptyQidPairSeq* = seq[(QueueID,QueueID)].default EmptyVidSet* = EmptyVidSeq.toHashSet
## Useful shortcut ## Useful shortcut
VOID_CODE_HASH* = EMPTY_CODE_HASH VOID_CODE_HASH* = EMPTY_CODE_HASH
## Equivalent of `nil` for `Account` object code hash ## Equivalent of `nil` for `Account` object code hash
VOID_HASH_KEY* = EMPTY_ROOT_HASH.to(HashKey) VOID_HASH_KEY* = HashKey()
## Void equivalent for Merkle hash value ## Void equivalent for Merkle hash value
VOID_HASH_LABEL* = HashLabel(root: VertexID(0), key: VOID_HASH_KEY) VOID_HASH_LABEL* = HashLabel()
## Void equivalent for Merkle hash value ## Void equivalent for Merkle hash value
EmptyQidPairSeq* = seq[(QueueID,QueueID)].default
## Useful shortcut
DEFAULT_QID_QUEUES* = [ DEFAULT_QID_QUEUES* = [
(128, 0), ## Consecutive list of 128 filter slots (128, 0), ## Consecutive list of 128 filter slots
( 64, 63), ## Overflow list, 64 filters, skipping 63 filters in-between ( 64, 63), ## Overflow list, 64 filters, skipping 63 filters in-between

View File

@ -25,10 +25,7 @@ import
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc toHex(w: VertexID): string = proc toHex(w: VertexID): string =
w.uint64.toHex.toLowerAscii w.uint64.toHex
proc toHex(w: HashKey): string =
w.ByteArray32.toHex.toLowerAscii
proc toHexLsb(w: int8): string = proc toHexLsb(w: int8): string =
$"0123456789abcdef"[w and 15] $"0123456789abcdef"[w and 15]
@ -51,21 +48,27 @@ proc sortedKeys(pPrf: HashSet[VertexID]): seq[VertexID] =
proc toPfx(indent: int; offset = 0): string = proc toPfx(indent: int; offset = 0): string =
if 0 < indent+offset: "\n" & " ".repeat(indent+offset) else: "" if 0 < indent+offset: "\n" & " ".repeat(indent+offset) else: ""
proc labelVidUpdate(db: AristoDbRef, lbl: HashLabel, vid: VertexID): string = proc lidVidUpdate(
if lbl.key.isValid and vid.isValid: db: AristoDbRef;
root: VertexID;
lid: HashKey;
vid: VertexID;
): string =
if lid.isValid and vid.isValid:
let lbl = HashLabel(root: root, key: lid)
if not db.top.isNil: if not db.top.isNil:
let lblVid = db.top.pAmk.getOrVoid lbl let vids = db.top.pAmk.getOrVoid lbl
if lblVid.isValid: if vids.isValid:
if lblVid != vid: if vid notin vids:
result = "(!)" result = "(!)"
return return
block: block:
let lblVid = db.xMap.getOrVoid lbl let vids = db.xMap.getOrVoid lbl
if lblVid.isValid: if vids.isValid:
if lblVid != vid: if vid notin vids:
result = "(!)" result = "(!)"
return return
db.xMap[lbl] = vid db.xMap.append(lbl, vid)
proc squeeze(s: string; hex = false; ignLen = false): string = proc squeeze(s: string; hex = false; ignLen = false): string =
## For long strings print `begin..end` only ## For long strings print `begin..end` only
@ -83,7 +86,7 @@ proc squeeze(s: string; hex = false; ignLen = false): string =
result &= ".." & s[s.len-16 .. ^1] result &= ".." & s[s.len-16 .. ^1]
proc stripZeros(a: string): string = proc stripZeros(a: string): string =
a.strip(leading=true, trailing=false, chars={'0'}).toLowerAscii a.strip(leading=true, trailing=false, chars={'0'})
proc ppVid(vid: VertexID; pfx = true): string = proc ppVid(vid: VertexID; pfx = true): string =
if pfx: if pfx:
@ -119,55 +122,61 @@ proc ppQid(qid: QueueID): string =
else: else:
break here break here
return return
result &= qid.toHex.stripZeros.toLowerAscii result &= qid.toHex.stripZeros
proc ppVidList(vGen: openArray[VertexID]): string = proc ppVidList(vGen: openArray[VertexID]): string =
"[" & vGen.mapIt(it.ppVid).join(",") & "]" "[" & vGen.mapIt(it.ppVid).join(",") & "]"
proc ppVidList(vGen: HashSet[VertexID]): string =
"{" & vGen.sortedKeys.mapIt(it.ppVid).join(",") & "}"
proc vidCode(lbl: HashLabel, db: AristoDbRef): uint64 = proc vidCode(lbl: HashLabel, db: AristoDbRef): uint64 =
if lbl.isValid: if lbl.isValid:
if not db.top.isNil: if not db.top.isNil:
let vid = db.top.pAmk.getOrVoid lbl let vids = db.top.pAmk.getOrVoid lbl
if vid.isValid: if vids.isValid:
return vid.uint64 return vids.sortedKeys[0].uint64
block: block:
let vid = db.xMap.getOrVoid lbl let vids = db.xMap.getOrVoid lbl
if vid.isValid: if vids.isValid:
return vid.uint64 return vids.sortedKeys[0].uint64
proc ppKey(key: HashKey): string = proc ppKey(key: HashKey; db: AristoDbRef; root: VertexID; pfx = true): string =
if key == HashKey.default: proc getVids: HashSet[VertexID] =
return "£ø" if not db.top.isNil:
let vids = db.top.pAmk.getOrVoid HashLabel(root: root, key: key)
if vids.isValid:
return vids
block:
let vids = db.xMap.getOrVoid HashLabel(root: root, key: key)
if vids.isValid:
return vids
if pfx:
result = "£"
if key == VOID_HASH_KEY: if key == VOID_HASH_KEY:
return "£r" result &= "ø"
elif not key.isValid:
"%" & key.toHex.squeeze(hex=true,ignLen=true) result &= "r"
else:
let
tag = if key.len < 32: "[#" & $key.len & "]" else: ""
vids = getVids()
if vids.isValid:
if not pfx and 0 < tag.len:
result &= "$"
if 1 < vids.len: result &= "{"
result &= vids.sortedKeys.mapIt(it.ppVid(pfx=false)).join(",")
if 1 < vids.len: result &= "}"
result &= tag
return
result &= @key.toHex.squeeze(hex=true,ignLen=true) & tag
proc ppLabel(lbl: HashLabel; db: AristoDbRef): string = proc ppLabel(lbl: HashLabel; db: AristoDbRef): string =
if lbl.key == HashKey.default: if lbl.isValid:
return "£ø" "%" & ($lbl.root.toHex).stripZeros &
if lbl.key == VOID_HASH_KEY: ":" & lbl.key.ppKey(db, lbl.root, pfx=false)
return "£r" else:
""
let rid = if not lbl.root.isValid: "ø:"
else: ($lbl.root.toHex).stripZeros & ":"
if not db.top.isNil:
let vid = db.top.pAmk.getOrVoid lbl
if vid.isValid:
return "£" & rid & vid.ppVid(pfx=false)
block:
let vid = db.xMap.getOrVoid lbl
if vid.isValid:
return "£" & rid & vid.ppVid(pfx=false)
"%" & rid & lbl.key.toHex.squeeze(hex=true,ignLen=true)
proc ppRootKey(a: HashKey): string =
if a.isValid:
return a.ppKey
proc ppCodeKey(a: HashKey): string =
a.ppKey
proc ppLeafTie(lty: LeafTie, db: AristoDbRef): string = proc ppLeafTie(lty: LeafTie, db: AristoDbRef): string =
if not db.top.isNil: if not db.top.isNil:
@ -191,13 +200,13 @@ proc ppPayload(p: PayloadRef, db: AristoDbRef): string =
of RawData: of RawData:
result &= p.rawBlob.toHex.squeeze(hex=true) result &= p.rawBlob.toHex.squeeze(hex=true)
of RlpData: of RlpData:
result &= "(" & p.rlpBlob.toHex.squeeze(hex=true) & ")" result &= "[#" & p.rlpBlob.toHex.squeeze(hex=true) & "]"
of AccountData: of AccountData:
result = "(" result = "("
result &= $p.account.nonce & "," result &= $p.account.nonce & ","
result &= $p.account.balance & "," result &= $p.account.balance & ","
result &= p.account.storageID.ppVid & "," result &= p.account.storageID.ppVid & ","
result &= p.account.codeHash.to(HashKey).ppCodeKey() & ")" result &= $p.account.codeHash & ")"
proc ppVtx(nd: VertexRef, db: AristoDbRef, vid: VertexID): string = proc ppVtx(nd: VertexRef, db: AristoDbRef, vid: VertexID): string =
if not nd.isValid: if not nd.isValid:
@ -230,7 +239,7 @@ proc ppSTab(
"{" & sTab.sortedKeys "{" & sTab.sortedKeys
.mapIt((it, sTab.getOrVoid it)) .mapIt((it, sTab.getOrVoid it))
.mapIt("(" & it[0].ppVid & "," & it[1].ppVtx(db,it[0]) & ")") .mapIt("(" & it[0].ppVid & "," & it[1].ppVtx(db,it[0]) & ")")
.join(indent.toPfx(2)) & "}" .join(indent.toPfx(1)) & "}"
proc ppLTab( proc ppLTab(
lTab: Table[LeafTie,VertexID]; lTab: Table[LeafTie,VertexID];
@ -240,7 +249,7 @@ proc ppLTab(
"{" & lTab.sortedKeys "{" & lTab.sortedKeys
.mapIt((it, lTab.getOrVoid it)) .mapIt((it, lTab.getOrVoid it))
.mapIt("(" & it[0].ppLeafTie(db) & "," & it[1].ppVid & ")") .mapIt("(" & it[0].ppLeafTie(db) & "," & it[1].ppVid & ")")
.join(indent.toPfx(2)) & "}" .join(indent.toPfx(1)) & "}"
proc ppPPrf(pPrf: HashSet[VertexID]): string = proc ppPPrf(pPrf: HashSet[VertexID]): string =
"{" & pPrf.sortedKeys.mapIt(it.ppVid).join(",") & "}" "{" & pPrf.sortedKeys.mapIt(it.ppVid).join(",") & "}"
@ -248,31 +257,35 @@ proc ppPPrf(pPrf: HashSet[VertexID]): string =
proc ppXMap*( proc ppXMap*(
db: AristoDbRef; db: AristoDbRef;
kMap: Table[VertexID,HashLabel]; kMap: Table[VertexID,HashLabel];
pAmk: Table[HashLabel,VertexID]; pAmk: Table[HashLabel,HashSet[VertexID]];
indent: int; indent: int;
): string = ): string =
let let pfx = indent.toPfx(1)
pfx = indent.toPfx(1)
dups = pAmk.values.toSeq.toCountTable.pairs.toSeq var dups: HashSet[VertexID]
.filterIt(1 < it[1]).toTable for vids in pAmk.values:
revOnly = pAmk.pairs.toSeq.filterIt(not kMap.hasKey it[1]) if 1 < vids.len:
.mapIt((it[1],it[0])).toTable dups = dups + vids
# Vertex IDs without forward mapping `kMap: VertexID -> HashLabel`
var revOnly: Table[VertexID,HashLabel]
for (lbl,vids) in pAmk.pairs:
for vid in vids:
if not kMap.hasKey vid:
revOnly[vid] = lbl
let revKeys =
revOnly.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID)
proc ppNtry(n: uint64): string = proc ppNtry(n: uint64): string =
var s = VertexID(n).ppVid var s = VertexID(n).ppVid
let lbl = kMap.getOrVoid VertexID(n) let lbl = kMap.getOrVoid VertexID(n)
if lbl.isValid: if lbl.isValid:
let vid = pAmk.getOrVoid lbl let vids = pAmk.getOrVoid lbl
if not vid.isValid: if VertexID(n) notin vids or 1 < vids.len:
s = "(" & s & "," & lbl.ppLabel(db) & "" s = "(" & s & "," & lbl.key.ppKey(db,lbl.root)
elif vid != VertexID(n): elif lbl.key.len < 32:
s = "(" & s & "," & lbl.ppLabel(db) & "," & vid.ppVid s &= "[#" & $lbl.key.len & "]"
let count = dups.getOrDefault(VertexID(n), 0)
if 0 < count:
if s[0] != '(':
s &= "(" & s
s &= ",*" & $count
else: else:
s &= "£ø" s &= "£ø"
if s[0] == '(': if s[0] == '(':
@ -281,7 +294,6 @@ proc ppXMap*(
result = "{" result = "{"
# Extra reverse lookups # Extra reverse lookups
let revKeys = revOnly.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID)
if 0 < revKeys.len: if 0 < revKeys.len:
proc ppRevlabel(vid: VertexID): string = proc ppRevlabel(vid: VertexID): string =
"(ø," & revOnly.getOrVoid(vid).ppLabel(db) & ")" "(ø," & revOnly.getOrVoid(vid).ppLabel(db) & ")"
@ -311,9 +323,9 @@ proc ppXMap*(
for vid in kMap.sortedKeys: for vid in kMap.sortedKeys:
let lbl = kMap.getOrVoid vid let lbl = kMap.getOrVoid vid
if lbl.isValid: if lbl.isValid:
cache.add (vid.uint64, lbl.vidCode(db), 0 < dups.getOrDefault(vid, 0)) cache.add (vid.uint64, lbl.vidCode(db), vid in dups)
let lblVid = pAmk.getOrDefault(lbl, VertexID(0)) let vids = pAmk.getOrVoid lbl
if lblVid != VertexID(0) and lblVid != vid: if (0 < vids.len and vid notin vids) or lbl.key.len < 32:
cache[^1][2] = true cache[^1][2] = true
else: else:
cache.add (vid.uint64, 0u64, true) cache.add (vid.uint64, 0u64, true)
@ -347,7 +359,12 @@ proc ppXMap*(
else: else:
result &= "}" result &= "}"
proc ppFilter(fl: FilterRef; db: AristoDbRef; indent: int): string = proc ppFilter(
fl: FilterRef;
db: AristoDbRef;
root: VertexID;
indent: int;
): string =
## Walk over filter tables ## Walk over filter tables
let let
pfx = indent.toPfx pfx = indent.toPfx
@ -358,8 +375,8 @@ proc ppFilter(fl: FilterRef; db: AristoDbRef; indent: int): string =
result &= " n/a" result &= " n/a"
return return
result &= pfx & "fid=" & fl.fid.ppFid result &= pfx & "fid=" & fl.fid.ppFid
result &= pfx & "src=" & fl.src.ppKey result &= pfx & "src=" & fl.src.to(HashKey).ppKey(db,root)
result &= pfx & "trg=" & fl.trg.ppKey result &= pfx & "trg=" & fl.trg.to(HashKey).ppKey(db,root)
result &= pfx & "vGen" & pfx1 & "[" & result &= pfx & "vGen" & pfx1 & "[" &
fl.vGen.mapIt(it.ppVid).join(",") & "]" fl.vGen.mapIt(it.ppVid).join(",") & "]"
result &= pfx & "sTab" & pfx1 & "{" result &= pfx & "sTab" & pfx1 & "{"
@ -371,10 +388,10 @@ proc ppFilter(fl: FilterRef; db: AristoDbRef; indent: int): string =
for n,vid in fl.kMap.sortedKeys: for n,vid in fl.kMap.sortedKeys:
let key = fl.kMap.getOrVoid vid let key = fl.kMap.getOrVoid vid
if 0 < n: result &= pfx2 if 0 < n: result &= pfx2
result &= $(1+n) & "(" & vid.ppVid & "," & key.ppKey & ")" result &= $(1+n) & "(" & vid.ppVid & "," & key.ppKey(db,root) & ")"
result &= "}" result &= "}"
proc ppBe[T](be: T; db: AristoDbRef; indent: int): string = proc ppBe[T](be: T; db: AristoDbRef; root: VertexID; indent: int): string =
## Walk over backend tables ## Walk over backend tables
let let
pfx = indent.toPfx pfx = indent.toPfx
@ -387,7 +404,7 @@ proc ppBe[T](be: T; db: AristoDbRef; indent: int): string =
$(1+it[0]) & "(" & it[1].ppVid & "," & it[2].ppVtx(db,it[1]) & ")" $(1+it[0]) & "(" & it[1].ppVid & "," & it[2].ppVtx(db,it[1]) & ")"
).join(pfx2) & "}" ).join(pfx2) & "}"
result &= pfx & "kMap" & pfx1 & "{" & be.walkKey.toSeq.mapIt( result &= pfx & "kMap" & pfx1 & "{" & be.walkKey.toSeq.mapIt(
$(1+it[0]) & "(" & it[1].ppVid & "," & it[2].ppKey & ")" $(1+it[0]) & "(" & it[1].ppVid & "," & it[2].ppKey(db,root) & ")"
).join(pfx2) & "}" ).join(pfx2) & "}"
proc ppLayer( proc ppLayer(
@ -430,12 +447,12 @@ proc ppLayer(
let let
tLen = layer.sTab.len tLen = layer.sTab.len
info = "sTab(" & $tLen & ")" info = "sTab(" & $tLen & ")"
result &= info.doPrefix(0 < tLen) & layer.sTab.ppSTab(db,indent+1) result &= info.doPrefix(0 < tLen) & layer.sTab.ppSTab(db,indent+2)
if lTabOk: if lTabOk:
let let
tlen = layer.lTab.len tlen = layer.lTab.len
info = "lTab(" & $tLen & ")" info = "lTab(" & $tLen & ")"
result &= info.doPrefix(0 < tLen) & layer.lTab.ppLTab(indent+1) result &= info.doPrefix(0 < tLen) & layer.lTab.ppLTab(indent+2)
if kMapOk: if kMapOk:
let let
tLen = layer.kMap.len tLen = layer.kMap.len
@ -443,7 +460,7 @@ proc ppLayer(
lInf = if tLen == uLen: $tLen else: $tLen & "," & $ulen lInf = if tLen == uLen: $tLen else: $tLen & "," & $ulen
info = "kMap(" & lInf & ")" info = "kMap(" & lInf & ")"
result &= info.doPrefix(0 < tLen + uLen) result &= info.doPrefix(0 < tLen + uLen)
result &= db.ppXMap(layer.kMap, layer.pAmk,indent+1) result &= db.ppXMap(layer.kMap, layer.pAmk, indent+2)
if pPrfOk: if pPrfOk:
let let
tLen = layer.pPrf.len tLen = layer.pPrf.len
@ -458,8 +475,14 @@ proc ppLayer(
# Public functions # Public functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc pp*(key: HashKey): string = proc pp*(w: Hash256): string =
key.ppKey w.data.toHex.squeeze(hex=true,ignLen=true)
proc pp*(w: HashKey; sig: MerkleSignRef): string =
w.ppKey(sig.db, sig.root)
proc pp*(w: HashKey; db = AristoDbRef(); root = VertexID(1)): string =
w.ppKey(db, root)
proc pp*(lbl: HashLabel, db = AristoDbRef()): string = proc pp*(lbl: HashLabel, db = AristoDbRef()): string =
lbl.ppLabel(db) lbl.ppLabel(db)
@ -506,23 +529,22 @@ proc pp*(nd: NodeRef; root: VertexID; db: AristoDbRef): string =
result &= $nd.lPfx.ppPathPfx & "," & nd.lData.pp(db) result &= $nd.lPfx.ppPathPfx & "," & nd.lData.pp(db)
of Extension: of Extension:
let lbl = HashLabel(root: root, key: nd.key[0])
result &= $nd.ePfx.ppPathPfx & "," & nd.eVid.ppVid & "," result &= $nd.ePfx.ppPathPfx & "," & nd.eVid.ppVid & ","
result &= lbl.ppLabel(db) & db.labelVidUpdate(lbl, nd.eVid) result &= nd.key[0].ppKey(db,root)
result &= db.lidVidUpdate(root, nd.key[0], nd.eVid)
of Branch: of Branch:
result &= "[" result &= "["
for n in 0..15: for n in 0..15:
if nd.bVid[n].isValid or nd.key[n].isValid: if nd.bVid[n].isValid or nd.key[n].isValid:
result &= nd.bVid[n].ppVid result &= nd.bVid[n].ppVid
let lbl = HashLabel(root: root, key: nd.key[n]) result &= db.lidVidUpdate(root, nd.key[n], nd.bVid[n]) & ","
result &= db.labelVidUpdate(lbl, nd.bVid[n]) & ","
result[^1] = ']' result[^1] = ']'
result &= ",[" result &= ",["
for n in 0..15: for n in 0..15:
if nd.bVid[n].isValid or nd.key[n].isValid: if nd.bVid[n].isValid or nd.key[n].isValid:
result &= HashLabel(root: root, key: nd.key[n]).ppLabel(db) result &= nd.key[n].ppKey(db,root)
result &= "," result &= ","
result[^1] = ']' result[^1] = ']'
result &= ")" result &= ")"
@ -550,7 +572,7 @@ proc pp*(leg: Leg; db = AristoDbRef()): string =
let lbl = db.top.kMap.getOrVoid leg.wp.vid let lbl = db.top.kMap.getOrVoid leg.wp.vid
if not lbl.isValid: if not lbl.isValid:
result &= "ø" result &= "ø"
elif leg.wp.vid != db.top.pAmk.getOrVoid lbl: elif leg.wp.vid notin db.top.pAmk.getOrVoid lbl:
result &= lbl.ppLabel(db) result &= lbl.ppLabel(db)
result &= "," result &= ","
if leg.backend: if leg.backend:
@ -592,7 +614,7 @@ proc pp*(pAmk: Table[Hashlabel,VertexID]; indent = 4): string =
proc pp*(kMap: Table[VertexID,Hashlabel]; db: AristoDbRef; indent = 4): string = proc pp*(kMap: Table[VertexID,Hashlabel]; db: AristoDbRef; indent = 4): string =
db.ppXMap(kMap, db.top.pAmk, indent) db.ppXMap(kMap, db.top.pAmk, indent)
proc pp*(pAmk: Table[Hashlabel,VertexID]; db: AristoDbRef; indent = 4): string = proc pp*(pAmk: VidsByLabel; db: AristoDbRef; indent = 4): string =
db.ppXMap(db.top.kMap, pAmk, indent) db.ppXMap(db.top.kMap, pAmk, indent)
# --------------------- # ---------------------
@ -645,34 +667,43 @@ proc pp*(
proc pp*( proc pp*(
filter: FilterRef; filter: FilterRef;
db = AristoDbRef(); db = AristoDbRef();
root = VertexID(1);
indent = 4; indent = 4;
): string = ): string =
filter.ppFilter(db, indent) filter.ppFilter(db, root, indent)
proc pp*( proc pp*(
be: BackendRef; be: BackendRef;
db: AristoDbRef; db: AristoDbRef;
root = VertexID(1);
indent = 4; indent = 4;
): string = ): string =
result = db.roFilter.ppFilter(db, indent+1) & indent.toPfx result = db.roFilter.ppFilter(db, root, indent+1) & indent.toPfx
case be.kind: case be.kind:
of BackendMemory: of BackendMemory:
result &= be.MemBackendRef.ppBe(db, indent) result &= be.MemBackendRef.ppBe(db, root, indent)
of BackendRocksDB: of BackendRocksDB:
result &= be.RdbBackendRef.ppBe(db, indent) result &= be.RdbBackendRef.ppBe(db, root, indent)
of BackendVoid: of BackendVoid:
result &= "<NoBackend>" result &= "<NoBackend>"
proc pp*( proc pp*(
db: AristoDbRef; db: AristoDbRef;
backendOk = false; backendOk = false;
root = VertexID(1);
indent = 4; indent = 4;
): string = ): string =
result = db.top.pp(db, indent=indent) & indent.toPfx result = db.top.pp(db, indent=indent) & indent.toPfx
if backendOk: if backendOk:
result &= db.backend.pp(db) result &= db.backend.pp(db)
else: else:
result &= db.roFilter.ppFilter(db, indent+1) result &= db.roFilter.ppFilter(db, root, indent+1)
proc pp*(sdb: MerkleSignRef; indent = 4): string =
"count=" & $sdb.count &
" root=" & sdb.root.pp &
" error=" & $sdb.error &
"\n db\n " & sdb.db.pp(root=sdb.root, indent=indent+1)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

View File

@ -299,7 +299,10 @@ proc deleteImpl(
if 1 < hike.legs.len: if 1 < hike.legs.len:
# Get current `Branch` vertex `br` # Get current `Branch` vertex `br`
let br = hike.legs[^2].wp let br = block:
var wp = hike.legs[^2].wp
wp.vtx = wp.vtx.dup # make sure that layers are not impliciteley modified
wp
if br.vtx.vType != Branch: if br.vtx.vType != Branch:
return err((br.vid,DelBranchExpexted)) return err((br.vid,DelBranchExpexted))
@ -376,7 +379,7 @@ proc delete*(
db: AristoDbRef; db: AristoDbRef;
root: VertexID; root: VertexID;
path: openArray[byte]; path: openArray[byte];
): Result[void,(VertexID,AristoError)] = ): Result[void,(VertexID,AristoError)] =
## Variant of `fetchPayload()` ## Variant of `fetchPayload()`
## ##
db.delete(? path.initNibbleRange.hikeUp(root, db).mapErr toVae) db.delete(? path.initNibbleRange.hikeUp(root, db).mapErr toVae)

View File

@ -46,6 +46,14 @@ type
txUid*: uint ## Unique ID among transactions txUid*: uint ## Unique ID among transactions
level*: int ## Stack index for this transaction level*: int ## Stack index for this transaction
MerkleSignRef* = ref object
## Simple Merkle signature calculatior for key-value lists
root*: VertexID
db*: AristoDbRef
count*: uint
error*: AristoError
errKey*: Blob
DudesRef = ref object DudesRef = ref object
case rwOk: bool case rwOk: bool
of true: of true:
@ -67,7 +75,7 @@ type
dudes: DudesRef ## Related DB descriptors dudes: DudesRef ## Related DB descriptors
# Debugging data below, might go away in future # Debugging data below, might go away in future
xMap*: Table[HashLabel,VertexID] ## For pretty printing, extends `pAmk` xMap*: VidsByLabel ## For pretty printing, extends `pAmk`
AristoDbAction* = proc(db: AristoDbRef) {.gcsafe, raises: [].} AristoDbAction* = proc(db: AristoDbRef) {.gcsafe, raises: [].}
## Generic call back function/closure. ## Generic call back function/closure.
@ -82,12 +90,18 @@ func getOrVoid*[W](tab: Table[W,VertexRef]; w: W): VertexRef =
func getOrVoid*[W](tab: Table[W,HashLabel]; w: W): HashLabel = func getOrVoid*[W](tab: Table[W,HashLabel]; w: W): HashLabel =
tab.getOrDefault(w, VOID_HASH_LABEL) tab.getOrDefault(w, VOID_HASH_LABEL)
func getOrVoid*[W](tab: Table[W,NodeRef]; w: W): NodeRef =
tab.getOrDefault(w, NodeRef(nil))
func getOrVoid*[W](tab: Table[W,HashKey]; w: W): HashKey = func getOrVoid*[W](tab: Table[W,HashKey]; w: W): HashKey =
tab.getOrDefault(w, VOID_HASH_KEY) tab.getOrDefault(w, VOID_HASH_KEY)
func getOrVoid*[W](tab: Table[W,VertexID]; w: W): VertexID = func getOrVoid*[W](tab: Table[W,VertexID]; w: W): VertexID =
tab.getOrDefault(w, VertexID(0)) tab.getOrDefault(w, VertexID(0))
func getOrVoid*[W](tab: Table[W,HashSet[VertexID]]; w: W): HashSet[VertexID] =
tab.getOrDefault(w, EmptyVidSet)
# -------- # --------
func isValid*(vtx: VertexRef): bool = func isValid*(vtx: VertexRef): bool =
@ -102,15 +116,24 @@ func isValid*(pld: PayloadRef): bool =
func isValid*(filter: FilterRef): bool = func isValid*(filter: FilterRef): bool =
filter != FilterRef(nil) filter != FilterRef(nil)
func isValid*(key: HashKey): bool = func isValid*(root: Hash256): bool =
key != VOID_HASH_KEY root != EMPTY_ROOT_HASH
func isValid*(lbl: HashLabel): bool = func isValid*(key: HashKey): bool =
lbl != VOID_HASH_LABEL if key.len == 32:
key.to(Hash256).isValid
else:
0 < key.len
func isValid*(vid: VertexID): bool = func isValid*(vid: VertexID): bool =
vid != VertexID(0) vid != VertexID(0)
func isValid*(lbl: HashLabel): bool =
lbl.root.isValid and lbl.key.isValid
func isValid*(sqv: HashSet[VertexID]): bool =
sqv != EmptyVidSet
func isValid*(qid: QueueID): bool = func isValid*(qid: QueueID): bool =
qid != QueueID(0) qid != QueueID(0)
@ -126,18 +149,6 @@ func hash*(db: AristoDbRef): Hash =
## Table/KeyedQueue/HashSet mixin ## Table/KeyedQueue/HashSet mixin
cast[pointer](db).hash cast[pointer](db).hash
# Note that the below `init()` function cannot go into `desc_identifiers` as
# this would result in a circular import.
func init*(key: var HashKey; data: openArray[byte]): bool =
## Import argument `data` into `key` which must have length either `32`, or
## `0`. The latter case is equivalent to an all zero byte array of size `32`.
if data.len == 32:
(addr key.ByteArray32[0]).copyMem(unsafeAddr data[0], data.len)
return true
if data.len == 0:
key = VOID_HASH_KEY
return true
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions, `dude` related # Public functions, `dude` related
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -16,12 +16,16 @@ type
# Rlp decoder, `read()` # Rlp decoder, `read()`
Rlp2Or17ListEntries Rlp2Or17ListEntries
RlpBlobExpected RlpBlobExpected
RlpBranchLinkExpected RlpBranchHashKeyExpected
RlpExtPathEncoding
RlpNonEmptyBlobExpected
RlpEmptyBlobExpected RlpEmptyBlobExpected
RlpRlpException RlpExtHashKeyExpected
RlpHashKeyExpected
RlpNonEmptyBlobExpected
RlpOtherException RlpOtherException
RlpRlpException
# Serialise decoder
SerCantResolveStorageRoot
# Data record transcoders, `deblobify()` and `blobify()` # Data record transcoders, `deblobify()` and `blobify()`
BlobifyNilFilter BlobifyNilFilter
@ -34,7 +38,8 @@ type
DeblobNilArgument DeblobNilArgument
DeblobUnknown DeblobUnknown
DeblobTooShort DeblobVtxTooShort
DeblobHashKeyExpected
DeblobBranchTooShort DeblobBranchTooShort
DeblobBranchSizeGarbled DeblobBranchSizeGarbled
DeblobBranchInxOutOfRange DeblobBranchInxOutOfRange
@ -90,13 +95,15 @@ type
MergeAssemblyFailed # Ooops, internal error MergeAssemblyFailed # Ooops, internal error
MergeHashKeyInvalid MergeHashKeyInvalid
MergeHashKeyCachedAlready
MergeHashKeyDiffersFromCached
MergeHashKeyRevLookUpGarbled
MergeRootVidInvalid MergeRootVidInvalid
MergeRootKeyInvalid MergeRootKeyInvalid
MergeRevVidMustHaveBeenCached MergeRevVidMustHaveBeenCached
MergeHashKeyCachedAlready
MergeHashKeyDiffersFromCached
MergeNodeVtxDiffersFromExisting MergeNodeVtxDiffersFromExisting
MergeRootKeyDiffersForVid MergeRootKeyDiffersForVid
MergeNodeVtxDuplicates
# Update `Merkle` hashes `hashify()` # Update `Merkle` hashes `hashify()`
HashifyCannotComplete HashifyCannotComplete
@ -128,15 +135,19 @@ type
CheckAnyVtxEmptyKeyMissing CheckAnyVtxEmptyKeyMissing
CheckAnyVtxEmptyKeyExpected CheckAnyVtxEmptyKeyExpected
CheckAnyVtxEmptyKeyMismatch CheckAnyVtxEmptyKeyMismatch
CheckAnyVtxBranchLinksMissing
CheckAnyVtxExtPfxMissing
CheckAnyVtxLockWithoutKey
CheckAnyRevVtxMissing CheckAnyRevVtxMissing
CheckAnyRevVtxDup CheckAnyRevVtxDup
CheckAnyRevCountMismatch CheckAnyRevCountMismatch
CheckAnyVtxLockWithoutKey
# Backend structural check `checkBE()` # Backend structural check `checkBE()`
CheckBeVtxInvalid CheckBeVtxInvalid
CheckBeKeyInvalid
CheckBeVtxMissing CheckBeVtxMissing
CheckBeVtxBranchLinksMissing
CheckBeVtxExtPfxMissing
CheckBeKeyInvalid
CheckBeKeyMissing CheckBeKeyMissing
CheckBeKeyCantCompile CheckBeKeyCantCompile
CheckBeKeyMismatch CheckBeKeyMismatch
@ -229,6 +240,7 @@ type
RdbBeAddSstWriter RdbBeAddSstWriter
RdbBeFinishSstWriter RdbBeFinishSstWriter
RdbBeIngestSstWriter RdbBeIngestSstWriter
RdbHashKeyExpected
# Transaction wrappers # Transaction wrappers
TxArgStaleTx TxArgStaleTx

View File

@ -17,12 +17,10 @@
import import
std/[sequtils, strutils, hashes], std/[sequtils, strutils, hashes],
eth/[common, trie/nibbles], eth/[common, trie/nibbles],
results,
stint stint
type type
ByteArray32* = array[32,byte]
## Used for 32 byte hash components repurposed as Merkle hash labels.
QueueID* = distinct uint64 QueueID* = distinct uint64
## Identifier used to tag filter logs stored on the backend. ## Identifier used to tag filter logs stored on the backend.
@ -37,10 +35,27 @@ type
## backend of the database, there is no other reference to the node than ## backend of the database, there is no other reference to the node than
## the very same `VertexID`. ## the very same `VertexID`.
HashKey* = distinct ByteArray32 HashKey* = object
## Dedicated `Hash256` object variant that is used for labelling the ## Ethereum MPTs use Keccak hashes as node links if the size of an RLP
## vertices of the `Patricia Trie` in order to make it a ## encoded node is of size at least 32 bytes. Otherwise, the RLP encoded
## `Merkle Patricia Tree`. ## node value is used as a pseudo node link (rather than a hash.) Such a
## node is nor stored on key-value database. Rather the RLP encoded node
## value is stored instead of a lode link in a parent node instead. Only
## for the root hash, the top level node is always referred to by the
## hash.
##
## This compaction feature needed an abstraction of the `HashKey` object
## which is either a `Hash256` or a `Blob` of length at most 31 bytes.
## This leaves two ways of representing an empty/void `HashKey` type.
## It may be available as an empty `Blob` of zero length, or the
## `Hash256` type of the Keccak hash of an empty `Blob` (see constant
## `EMPTY_ROOT_HASH`.)
##
case isHash: bool
of true:
key: Hash256 ## Merkle hash tacked to a vertex
else:
blob: Blob ## Optionally encoded small node data
PathID* = object PathID* = object
## Path into the `Patricia Trie`. This is a chain of maximal 64 nibbles ## Path into the `Patricia Trie`. This is a chain of maximal 64 nibbles
@ -79,11 +94,23 @@ type
## `Aristo Trie`. They are used temporarily and in caches or backlog ## `Aristo Trie`. They are used temporarily and in caches or backlog
## tables. ## tables.
root*: VertexID ## Root ID for the sub-trie. root*: VertexID ## Root ID for the sub-trie.
key*: HashKey ## Merkle hash tacked to a vertex. key*: HashKey ## Merkle hash or encoded small node data
static: # ------------------------------------------------------------------------------
# Not that there is no doubt about this ... # Private helpers
doAssert HashKey.default.ByteArray32.initNibbleRange.len == 64 # ------------------------------------------------------------------------------
func to(lid: HashKey; T: type PathID): T =
## Helper to bowrrow certain properties from `PathID`
if lid.isHash:
PathID(pfx: UInt256.fromBytesBE lid.key.data, length: 64)
elif 0 < lid.blob.len:
doAssert lid.blob.len < 32
var a32: array[32,byte]
(addr a32[0]).copyMem(unsafeAddr lid.blob[0], lid.blob.len)
PathID(pfx: UInt256.fromBytesBE a32, length: 2 * lid.blob.len.uint8)
else:
PathID()
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public helpers: `VertexID` scalar data model # Public helpers: `VertexID` scalar data model
@ -184,6 +211,52 @@ func `==`*(a, b: PathID): bool =
## (see `normal()`.) ## (see `normal()`.)
a.pfx == b.pfx and a.length == b.length a.pfx == b.pfx and a.length == b.length
func cmp*(a, b: PathID): int =
if a < b: -1 elif b < a: 1 else: 0
# ------------------------------------------------------------------------------
# Public helpers: `HashKey` ordered scalar data model
# ------------------------------------------------------------------------------
func len*(lid: HashKey): int =
if lid.isHash: 32 else: lid.blob.len
func fromBytes*(T: type HashKey; data: openArray[byte]): Result[T,void] =
## Write argument `data` of length 0 or between 2 and 32 bytes as a `HashKey`.
##
## A function argument `data` of length 32 is used as-is.
##
## For a function argument `data` of length between 2 and 31, the first
## byte must be the start of an RLP encoded list, i.e. `0xc0 + len` where
## where `len` is one less as the `data` length.
##
if data.len == 32:
var lid: T
lid.isHash = true
(addr lid.key.data[0]).copyMem(unsafeAddr data[0], data.len)
return ok lid
if data.len == 0:
return ok HashKey()
if 1 < data.len and data.len < 32 and data[0].int == 0xbf + data.len:
return ok T(isHash: false, blob: @data)
err()
func `<`*(a, b: HashKey): bool =
## Slow, but useful for debug sorting
a.to(PathID) < b.to(PathID)
func `==`*(a, b: HashKey): bool =
if a.isHash != b.isHash:
false
elif a.isHash:
a.key == b.key
else:
a.blob == b.blob
func cmp*(a, b: HashKey): int =
## Slow, but useful for debug sorting
if a < b: -1 elif b < a: 1 else: 0
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public helpers: `LeafTie` ordered scalar data model # Public helpers: `LeafTie` ordered scalar data model
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -223,35 +296,42 @@ func cmp*(a, b: LeafTie): int =
# Public helpers: Reversible conversions between `PathID`, `HashKey`, etc. # Public helpers: Reversible conversions between `PathID`, `HashKey`, etc.
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc to*(key: HashKey; T: type UInt256): T =
T.fromBytesBE key.ByteArray32
func to*(key: HashKey; T: type Hash256): T =
T(data: ByteArray32(key))
func to*(key: HashKey; T: type PathID): T =
## Not necessarily reversible for shorter lengths `PathID` values
T(pfx: UInt256.fromBytesBE key.ByteArray32, length: 64)
func to*(hash: Hash256; T: type HashKey): T =
hash.data.T
func to*(key: HashKey; T: type Blob): T = func to*(key: HashKey; T: type Blob): T =
## Representation of a `HashKey` as `Blob` (preserving full information) ## Rewrite `HashKey` argument as `Blob` type of length between 0 and 32. A
key.ByteArray32.toSeq ## blob of length 32 is taken as a representation of a `HashKey` type while
## samller blobs are expected to represent an RLP encoded small node.
if key.isHash:
@(key.key.data)
else:
key.blob
func to*(key: HashKey; T: type NibblesSeq): T = func `@`*(lid: HashKey): Blob =
## Representation of a `HashKey` as `NibbleSeq` (preserving full information) ## Variant of `to(Blob)`
key.ByteArray32.initNibbleRange() lid.to(Blob)
func to*(pid: PathID; T: type NibblesSeq): T = func to*(pid: PathID; T: type NibblesSeq): T =
## Representation of a `HashKey` as `NibbleSeq` (preserving full information) ## Representation of a `PathID` as `NibbleSeq` (preserving full information)
let nibbles = pid.pfx.UInt256.toBytesBE.toSeq.initNibbleRange() let nibbles = pid.pfx.UInt256.toBytesBE.toSeq.initNibbleRange()
if pid.length < 64: if pid.length < 64:
nibbles.slice(0, pid.length.int) nibbles.slice(0, pid.length.int)
else: else:
nibbles nibbles
func to*(lid: HashKey; T: type Hash256): T =
## Returns the `Hash236` key if available, otherwise the Keccak hash of
## the `Blob` version.
if lid.isHash:
lid.key
elif 0 < lid.blob.len:
lid.blob.keccakHash
else:
EMPTY_ROOT_HASH
func to*(key: Hash256; T: type HashKey): T =
## This is an efficient version of `HashKey.fromBytes(key.data).value`, not
## to be confused with `digestTo(HashKey)`.
T(isHash: true, key: key)
func to*(n: SomeUnsignedInt|UInt256; T: type PathID): T = func to*(n: SomeUnsignedInt|UInt256; T: type PathID): T =
## Representation of a scalar as `PathID` (preserving full information) ## Representation of a scalar as `PathID` (preserving full information)
T(pfx: n.u256, length: 64) T(pfx: n.u256, length: 64)
@ -261,8 +341,13 @@ func to*(n: SomeUnsignedInt|UInt256; T: type PathID): T =
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
func digestTo*(data: openArray[byte]; T: type HashKey): T = func digestTo*(data: openArray[byte]; T: type HashKey): T =
## Keccak hash of a `Blob` like argument, represented as a `HashKey` ## For argument `data` with length smaller than 32, import them as-is into
keccakHash(data).data.T ## the result. Otherwise import the Keccak hash of the argument `data`.
if data.len < 32:
result.blob = @data
else:
result.isHash = true
result.key = data.keccakHash
func normal*(a: PathID): PathID = func normal*(a: PathID): PathID =
## Normalise path ID representation ## Normalise path ID representation
@ -283,22 +368,28 @@ func hash*(a: PathID): Hash =
h = h !& a.length.hash h = h !& a.length.hash
!$h !$h
func hash*(a: HashKey): Hash {.borrow.} func hash*(a: HashKey): Hash =
## Table/KeyedQueue mixin
var h: Hash = 0
if a.isHash:
h = h !& a.key.hash
else:
h = h !& a.blob.hash
!$h
func `==`*(a, b: HashKey): bool {.borrow.} func hash*(lbl: HashLabel): Hash =
## Table/KeyedQueue/HashSet mixin
func read*(rlp: var Rlp; T: type HashKey;): T {.gcsafe, raises: [RlpError].} = var h: Hash = 0
rlp.read(Hash256).to(T) h = h !& lbl.root.hash
h = h !& lbl.key.hash
func append*(writer: var RlpWriter, val: HashKey) = !$h
writer.append(val.to(Hash256))
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Miscellaneous helpers # Miscellaneous helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
func `$`*(key: HashKey): string = func `$`*(key: Hash256): string =
let w = UInt256.fromBytesBE key.ByteArray32 let w = UInt256.fromBytesBE key.data
if w == high(UInt256): if w == high(UInt256):
"2^256-1" "2^256-1"
elif w == 0.u256: elif w == 0.u256:
@ -316,8 +407,11 @@ func `$`*(key: HashKey): string =
func `$`*(a: PathID): string = func `$`*(a: PathID): string =
if a.pfx != 0: if a.pfx != 0:
result = ($a.pfx.toHex).strip( var dgts = $a.pfx.toHex
leading=true, trailing=false, chars={'0'}).toLowerAscii if a.length < 64:
dgts = dgts[0 ..< a.length]
result = dgts.strip(
leading=true, trailing=false, chars={'0'})
elif a.length != 0: elif a.length != 0:
result = "0" result = "0"
if a.length < 64: if a.length < 64:
@ -326,7 +420,7 @@ func `$`*(a: PathID): string =
func `$`*(a: LeafTie): string = func `$`*(a: LeafTie): string =
if a.root != 0: if a.root != 0:
result = ($a.root.uint64.toHex).strip( result = ($a.root.uint64.toHex).strip(
leading=true, trailing=false, chars={'0'}).toLowerAscii leading=true, trailing=false, chars={'0'})
else: else:
result = "0" result = "0"
result &= ":" & $a.path result &= ":" & $a.path

View File

@ -15,7 +15,7 @@
{.push raises: [].} {.push raises: [].}
import import
std/[sets, tables], std/[hashes, sets, tables],
eth/[common, trie/nibbles], eth/[common, trie/nibbles],
"."/[desc_error, desc_identifiers] "."/[desc_error, desc_identifiers]
@ -75,7 +75,7 @@ type
NodeRef* = ref object of VertexRef NodeRef* = ref object of VertexRef
## Combined record for a *traditional* ``Merkle Patricia Tree` node merged ## Combined record for a *traditional* ``Merkle Patricia Tree` node merged
## with a structural `VertexRef` type object. ## with a structural `VertexRef` type object.
error*: AristoError ## Can be used for error signalling error*: AristoError ## Used for error signalling in RLP decoder
key*: array[16,HashKey] ## Merkle hash/es for vertices key*: array[16,HashKey] ## Merkle hash/es for vertices
# ---------------------- # ----------------------
@ -83,19 +83,22 @@ type
FilterRef* = ref object FilterRef* = ref object
## Delta layer with expanded sequences for quick access ## Delta layer with expanded sequences for quick access
fid*: FilterID ## Filter identifier fid*: FilterID ## Filter identifier
src*: HashKey ## Applicable to this state root src*: Hash256 ## Applicable to this state root
trg*: HashKey ## Resulting state root (i.e. `kMap[1]`) trg*: Hash256 ## Resulting state root (i.e. `kMap[1]`)
sTab*: Table[VertexID,VertexRef] ## Filter structural vertex table sTab*: Table[VertexID,VertexRef] ## Filter structural vertex table
kMap*: Table[VertexID,HashKey] ## Filter Merkle hash key mapping kMap*: Table[VertexID,HashKey] ## Filter Merkle hash key mapping
vGen*: seq[VertexID] ## Filter unique vertex ID generator vGen*: seq[VertexID] ## Filter unique vertex ID generator
VidsByLabel* = Table[HashLabel,HashSet[VertexID]]
## Reverse lookup searching `VertexID` by the hash key/label.
LayerRef* = ref object LayerRef* = ref object
## Hexary trie database layer structures. Any layer holds the full ## Hexary trie database layer structures. Any layer holds the full
## change relative to the backend. ## change relative to the backend.
sTab*: Table[VertexID,VertexRef] ## Structural vertex table sTab*: Table[VertexID,VertexRef] ## Structural vertex table
lTab*: Table[LeafTie,VertexID] ## Direct access, path to leaf vertex lTab*: Table[LeafTie,VertexID] ## Direct access, path to leaf vertex
kMap*: Table[VertexID,HashLabel] ## Merkle hash key mapping kMap*: Table[VertexID,HashLabel] ## Merkle hash key mapping
pAmk*: Table[HashLabel,VertexID] ## Reverse `kMap` entries, hash key lookup pAmk*: VidsByLabel ## Reverse `kMap` entries, hash key lookup
pPrf*: HashSet[VertexID] ## Locked vertices (proof nodes) pPrf*: HashSet[VertexID] ## Locked vertices (proof nodes)
vGen*: seq[VertexID] ## Unique vertex ID generator vGen*: seq[VertexID] ## Unique vertex ID generator
txUid*: uint ## Transaction identifier if positive txUid*: uint ## Transaction identifier if positive
@ -135,10 +138,35 @@ const
func max(a, b, c: int): int = func max(a, b, c: int): int =
max(max(a,b),c) max(max(a,b),c)
# ------------------------------------------------------------------------------
# Public helpers: `Table[HashLabel,seq[VertexID]]`
# ------------------------------------------------------------------------------
proc append*(pAmk: var VidsByLabel; lbl: HashLabel; vid: VertexID) =
pAmk.withValue(lbl,value):
value[].incl vid
do: # else if not found
pAmk[lbl] = @[vid].toHashSet
proc delete*(pAmk: var VidsByLabel; lbl: HashLabel; vid: VertexID) =
var deleteItem = false
pAmk.withValue(lbl,value):
value[].excl vid
if value[].len == 0:
deleteItem = true
if deleteItem:
pAmk.del lbl
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public helpers: `NodeRef` and `PayloadRef` # Public helpers: `NodeRef` and `PayloadRef`
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
func hash*(node: NodeRef): Hash =
## Table/KeyedQueue/HashSet mixin
cast[pointer](node).hash
# ---------------
proc `==`*(a, b: PayloadRef): bool = proc `==`*(a, b: PayloadRef): bool =
## Beware, potential deep comparison ## Beware, potential deep comparison
if a.isNil: if a.isNil:

View File

@ -14,6 +14,7 @@
import import
std/[sequtils, tables], std/[sequtils, tables],
eth/common,
results, results,
"."/[aristo_desc, aristo_get, aristo_vid], "."/[aristo_desc, aristo_get, aristo_vid],
./aristo_desc/desc_backend, ./aristo_desc/desc_backend,
@ -89,16 +90,16 @@ proc merge*(
## Merge the argument `filter` into the read-only filter layer. Note that ## Merge the argument `filter` into the read-only filter layer. Note that
## this function has no control of the filter source. Having merged the ## this function has no control of the filter source. Having merged the
## argument `filter`, all the `top` and `stack` layers should be cleared. ## argument `filter`, all the `top` and `stack` layers should be cleared.
let ubeRootKey = block: let ubeRoot = block:
let rc = db.getKeyUBE VertexID(1) let rc = db.getKeyUBE VertexID(1)
if rc.isOk: if rc.isOk:
rc.value rc.value.to(Hash256)
elif rc.error == GetKeyNotFound: elif rc.error == GetKeyNotFound:
VOID_HASH_KEY EMPTY_ROOT_HASH
else: else:
return err((VertexID(1),rc.error)) return err((VertexID(1),rc.error))
db.roFilter = ? db.merge(filter, db.roFilter, ubeRootKey) db.roFilter = ? db.merge(filter, db.roFilter, ubeRoot)
ok() ok()

View File

@ -9,7 +9,8 @@
# except according to those terms. # except according to those terms.
import import
std/tables, std/[sets, tables],
eth/common,
results, results,
".."/[aristo_desc, aristo_desc/desc_backend, aristo_get], ".."/[aristo_desc, aristo_desc/desc_backend, aristo_get],
./filter_scheduler ./filter_scheduler
@ -17,8 +18,8 @@ import
type type
StateRootPair* = object StateRootPair* = object
## Helper structure for analysing state roots. ## Helper structure for analysing state roots.
be*: HashKey ## Backend state root be*: Hash256 ## Backend state root
fg*: HashKey ## Layer or filter implied state root fg*: Hash256 ## Layer or filter implied state root
FilterIndexPair* = object FilterIndexPair* = object
## Helper structure for fetching filters from cascaded fifo ## Helper structure for fetching filters from cascaded fifo
@ -39,7 +40,7 @@ proc getLayerStateRoots*(
## ##
var spr: StateRootPair var spr: StateRootPair
spr.be = block: let sprBeKey = block:
let rc = db.getKeyBE VertexID(1) let rc = db.getKeyBE VertexID(1)
if rc.isOk: if rc.isOk:
rc.value rc.value
@ -47,15 +48,20 @@ proc getLayerStateRoots*(
VOID_HASH_KEY VOID_HASH_KEY
else: else:
return err(rc.error) return err(rc.error)
spr.be = sprBeKey.to(Hash256)
block: spr.fg = block:
spr.fg = layer.kMap.getOrVoid(VertexID 1).key let lbl = layer.kMap.getOrVoid VertexID(1)
if spr.fg.isValid: if lbl.isValid:
return ok(spr) lbl.key.to(Hash256)
else:
EMPTY_ROOT_HASH
if spr.fg.isValid:
return ok(spr)
if chunkedMpt: if chunkedMpt:
let vid = layer.pAmk.getOrVoid HashLabel(root: VertexID(1), key: spr.be) let vids = layer.pAmk.getOrVoid HashLabel(root: VertexID(1), key: sprBeKey)
if vid == VertexID(1): if VertexID(1) in vids:
spr.fg = spr.be spr.fg = spr.be
return ok(spr) return ok(spr)

View File

@ -10,6 +10,7 @@
import import
std/tables, std/tables,
eth/common,
results, results,
".."/[aristo_desc, aristo_get] ".."/[aristo_desc, aristo_get]
@ -21,7 +22,7 @@ proc merge*(
db: AristoDbRef; db: AristoDbRef;
upper: FilterRef; # Src filter, `nil` is ok upper: FilterRef; # Src filter, `nil` is ok
lower: FilterRef; # Trg filter, `nil` is ok lower: FilterRef; # Trg filter, `nil` is ok
beStateRoot: HashKey; # Merkle hash key beStateRoot: Hash256; # Merkle hash key
): Result[FilterRef,(VertexID,AristoError)] = ): Result[FilterRef,(VertexID,AristoError)] =
## Merge argument `upper` into the `lower` filter instance. ## Merge argument `upper` into the `lower` filter instance.
## ##
@ -88,7 +89,7 @@ proc merge*(
elif newFilter.kMap.getOrVoid(vid).isValid: elif newFilter.kMap.getOrVoid(vid).isValid:
let rc = db.getKeyUBE vid let rc = db.getKeyUBE vid
if rc.isOk: if rc.isOk:
newFilter.kMap[vid] = key # VOID_HASH_KEY newFilter.kMap[vid] = key
elif rc.error == GetKeyNotFound: elif rc.error == GetKeyNotFound:
newFilter.kMap.del vid newFilter.kMap.del vid
else: else:
@ -113,9 +114,6 @@ proc merge*(
## | (src1==trg0) --> newFilter --> trg2 ## | (src1==trg0) --> newFilter --> trg2
## (src1==trg0) --> lower --> trg1 | ## (src1==trg0) --> lower --> trg1 |
## | ## |
const
noisy = false
if upper.isNil or lower.isNil: if upper.isNil or lower.isNil:
return err((VertexID(0),FilNilFilterRejected)) return err((VertexID(0),FilNilFilterRejected))

View File

@ -164,9 +164,9 @@ proc getKeyRc*(db: AristoDbRef; vid: VertexID): Result[HashKey,AristoError] =
if db.top.kMap.hasKey vid: if db.top.kMap.hasKey vid:
# If the key is to be deleted on the backend, a `VOID_HASH_LABEL` entry # If the key is to be deleted on the backend, a `VOID_HASH_LABEL` entry
# is kept on the local table in which case it is OK to return this value. # is kept on the local table in which case it is OK to return this value.
let key = db.top.kMap.getOrVoid(vid).key let lbl = db.top.kMap.getOrVoid vid
if key.isValid: if lbl.isValid:
return ok(key) return ok lbl.key
return err(GetKeyTempLocked) return err(GetKeyTempLocked)
db.getKeyBE vid db.getKeyBE vid
@ -174,10 +174,8 @@ proc getKey*(db: AristoDbRef; vid: VertexID): HashKey =
## Cascaded attempt to fetch a vertex from the top layer or the backend. ## Cascaded attempt to fetch a vertex from the top layer or the backend.
## The function returns `nil` on error or failure. ## The function returns `nil` on error or failure.
## ##
let rc = db.getKeyRc vid db.getKeyRc(vid).valueOr:
if rc.isOk: return VOID_HASH_KEY
return rc.value
VOID_HASH_KEY
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

View File

@ -47,7 +47,7 @@ import
eth/common, eth/common,
results, results,
stew/interval_set, stew/interval_set,
"."/[aristo_desc, aristo_get, aristo_hike, aristo_transcode, aristo_utils, "."/[aristo_desc, aristo_get, aristo_hike, aristo_serialise, aristo_utils,
aristo_vid] aristo_vid]
type type
@ -144,6 +144,7 @@ proc updateHashKey(
# Proceed `vidAttach()`, below # Proceed `vidAttach()`, below
# Othwise there is no Merkle hash, so create one with the `expected` key # Othwise there is no Merkle hash, so create one with the `expected` key
# and write it to the top level `pAmk[]` and `kMap[]` tables.
db.vidAttach(HashLabel(root: root, key: expected), vid) db.vidAttach(HashLabel(root: root, key: expected), vid)
ok() ok()
@ -166,11 +167,9 @@ proc leafToRootHasher(
continue continue
# Check against existing key, or store new key # Check against existing key, or store new key
let let key = rc.value.digestTo(HashKey)
key = rc.value.to(HashKey) db.updateHashKey(hike.root, wp.vid, key, bg).isOkOr:
rx = db.updateHashKey(hike.root, wp.vid, key, bg) return err((wp.vid,error))
if rx.isErr:
return err((wp.vid,rx.error))
ok -1 # all could be hashed ok -1 # all could be hashed
@ -197,7 +196,7 @@ proc deletedLeafHasher(
let rc = wp.vtx.toNode(db, stopEarly=false) let rc = wp.vtx.toNode(db, stopEarly=false)
if rc.isOk: if rc.isOk:
let let
expected = rc.value.to(HashKey) expected = rc.value.digestTo(HashKey)
key = db.getKey wp.vid key = db.getKey wp.vid
if key.isValid: if key.isValid:
if key != expected: if key != expected:
@ -301,11 +300,9 @@ proc resolveStateRoots(
let rc = fVal.vtx.toNode db let rc = fVal.vtx.toNode db
if rc.isOk: if rc.isOk:
# Update Merkle hash # Update Merkle hash
let let key = rc.value.digestTo(HashKey)
key = rc.value.to(HashKey) db.updateHashKey(fVal.w.root, fVid, key, fVal.w.onBe).isOkOr:
rx = db.updateHashKey(fVal.w.root, fVid, key, fVal.w.onBe) return err((fVid, error))
if rx.isErr:
return err((fVid, rx.error))
changes = true changes = true
else: else:
# Cannot complete with this vertex, so dig deeper and do it later # Cannot complete with this vertex, so dig deeper and do it later
@ -440,11 +437,9 @@ proc hashify*(
else: else:
# Update Merkle hash # Update Merkle hash
let let key = rc.value.digestTo(HashKey)
key = rc.value.to(HashKey) db.updateHashKey(val.root, vid, key, val.onBe).isOkOr:
rx = db.updateHashKey(val.root, vid, key, val.onBe) return err((vid,error))
if rx.isErr:
return err((vid,rx.error))
done.incl vid done.incl vid

View File

@ -34,7 +34,7 @@ import
../aristo_constants, ../aristo_constants,
../aristo_desc, ../aristo_desc,
../aristo_desc/desc_backend, ../aristo_desc/desc_backend,
../aristo_transcode, ../aristo_blobify,
./init_common ./init_common
type type
@ -93,7 +93,7 @@ proc getVtxFn(db: MemBackendRef): GetVtxFn =
proc getKeyFn(db: MemBackendRef): GetKeyFn = proc getKeyFn(db: MemBackendRef): GetKeyFn =
result = result =
proc(vid: VertexID): Result[HashKey,AristoError] = proc(vid: VertexID): Result[HashKey,AristoError] =
let key = db.kMap.getOrDefault(vid, VOID_HASH_KEY) let key = db.kMap.getOrVoid vid
if key.isValid: if key.isValid:
return ok key return ok key
err(GetKeyNotFound) err(GetKeyNotFound)
@ -327,7 +327,7 @@ iterator walkKey*(
): tuple[n: int, vid: VertexID, key: HashKey] = ): tuple[n: int, vid: VertexID, key: HashKey] =
## Iteration over the Markle hash sub-table. ## Iteration over the Markle hash sub-table.
for n,vid in be.kMap.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID): for n,vid in be.kMap.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID):
let key = be.kMap.getOrDefault(vid, VOID_HASH_KEY) let key = be.kMap.getOrVoid vid
if key.isValid: if key.isValid:
yield (n, vid, key) yield (n, vid, key)
@ -371,7 +371,7 @@ iterator walk*(
n.inc n.inc
for (_,vid,key) in be.walkKey: for (_,vid,key) in be.walkKey:
yield (n, KeyPfx, vid.uint64, key.to(Blob)) yield (n, KeyPfx, vid.uint64, @key)
n.inc n.inc
if not be.noFq: if not be.noFq:

View File

@ -34,7 +34,7 @@ import
../aristo_constants, ../aristo_constants,
../aristo_desc, ../aristo_desc,
../aristo_desc/desc_backend, ../aristo_desc/desc_backend,
../aristo_transcode, ../aristo_blobify,
./init_common, ./init_common,
./rocks_db/[rdb_desc, rdb_get, rdb_init, rdb_put, rdb_walk] ./rocks_db/[rdb_desc, rdb_get, rdb_init, rdb_put, rdb_walk]
@ -124,9 +124,9 @@ proc getKeyFn(db: RdbBackendRef): GetKeyFn =
# Decode data record # Decode data record
if 0 < rc.value.len: if 0 < rc.value.len:
var key: HashKey let lid = HashKey.fromBytes(rc.value).valueOr:
if key.init rc.value: return err(RdbHashKeyExpected)
return ok key return ok lid
err(GetKeyNotFound) err(GetKeyNotFound)
@ -224,7 +224,7 @@ proc putKeyFn(db: RdbBackendRef): PutKeyFn =
if hdl.error.isNil: if hdl.error.isNil:
for (vid,key) in vkps: for (vid,key) in vkps:
if key.isValid: if key.isValid:
hdl.keyCache = (vid, key.to(Blob)) hdl.keyCache = (vid, @key)
else: else:
hdl.keyCache = (vid, EmptyBlob) hdl.keyCache = (vid, EmptyBlob)
@ -402,9 +402,9 @@ iterator walkKey*(
): tuple[n: int, vid: VertexID, key: HashKey] = ): tuple[n: int, vid: VertexID, key: HashKey] =
## Variant of `walk()` iteration over the Markle hash sub-table. ## Variant of `walk()` iteration over the Markle hash sub-table.
for (n, xid, data) in be.rdb.walk KeyPfx: for (n, xid, data) in be.rdb.walk KeyPfx:
var hashKey: HashKey let lid = HashKey.fromBytes(data).valueOr:
if hashKey.init data: continue
yield (n, VertexID(xid), hashKey) yield (n, VertexID(xid), lid)
iterator walkFil*( iterator walkFil*(
be: RdbBackendRef; be: RdbBackendRef;

View File

@ -29,8 +29,9 @@ import
chronicles, chronicles,
eth/[common, trie/nibbles], eth/[common, trie/nibbles],
results, results,
stew/keyed_queue,
../../sync/protocol/snap/snap_types, ../../sync/protocol/snap/snap_types,
"."/[aristo_desc, aristo_get, aristo_hike, aristo_path, aristo_transcode, "."/[aristo_desc, aristo_get, aristo_hike, aristo_path, aristo_serialise,
aristo_vid] aristo_vid]
logScope: logScope:
@ -459,7 +460,7 @@ proc updatePayload(
hike: Hike; # No path legs hike: Hike; # No path legs
leafTie: LeafTie; # Leaf item to add to the database leafTie: LeafTie; # Leaf item to add to the database
payload: PayloadRef; # Payload value payload: PayloadRef; # Payload value
): Result[Hike,AristoError] = ): Result[Hike,AristoError] =
## Update leaf vertex if payloads differ ## Update leaf vertex if payloads differ
let vtx = hike.legs[^1].wp.vtx let vtx = hike.legs[^1].wp.vtx
@ -484,11 +485,11 @@ proc updatePayload(
proc mergeNodeImpl( proc mergeNodeImpl(
db: AristoDbRef; # Database, top layer db: AristoDbRef; # Database, top layer
hashKey: HashKey; # Merkel hash of node hashKey: HashKey; # Merkel hash of node (or so)
node: NodeRef; # Node derived from RLP representation node: NodeRef; # Node derived from RLP representation
rootVid: VertexID; # Current sub-trie rootVid: VertexID; # Current sub-trie
): Result[VertexID,AristoError] = ): Result[void,AristoError] =
## The function merges the argument hash key `hashKey` as expanded from the ## The function merges the argument hash key `lid` as expanded from the
## node RLP representation into the `Aristo Trie` database. The vertex is ## node RLP representation into the `Aristo Trie` database. The vertex is
## split off from the node and stored separately. So are the Merkle hashes. ## split off from the node and stored separately. So are the Merkle hashes.
## The vertex is labelled `locked`. ## The vertex is labelled `locked`.
@ -497,8 +498,19 @@ proc mergeNodeImpl(
## allocated, already. If the node comes straight from the `decode()` RLP ## allocated, already. If the node comes straight from the `decode()` RLP
## decoder as expected, these vertex IDs will be all zero. ## decoder as expected, these vertex IDs will be all zero.
## ##
if node.error != AristoError(0): ## This function expects that the parent for the argument node has already
return err(node.error) ## been installed, i.e. the top layer cache mapping
##
## pAmk: {HashKey} -> {{VertexID}}
##
## has a result for the argument `node`. Also, the invers top layer cache
## mapping
##
## sTab: {VertexID} -> {VertexRef}
##
## has no result for all images of the argument `node` under `pAmk`:
##
doAssert node.error == AristoError(0)
if not rootVid.isValid: if not rootVid.isValid:
return err(MergeRootKeyInvalid) return err(MergeRootKeyInvalid)
@ -511,13 +523,21 @@ proc mergeNodeImpl(
# order `root->.. ->leaf`. # order `root->.. ->leaf`.
let let
hashLbl = HashLabel(root: rootVid, key: hashKey) hashLbl = HashLabel(root: rootVid, key: hashKey)
vid = db.top.pAmk.getOrVoid hashLbl vids = db.top.pAmk.getOrVoid(hashLbl).toSeq
if not vid.isValid: isRoot = rootVid in vids
if vids.len == 0:
return err(MergeRevVidMustHaveBeenCached) return err(MergeRevVidMustHaveBeenCached)
if isRoot and 1 < vids.len:
# There can only be one root.
return err(MergeHashKeyRevLookUpGarbled)
let lbl = db.top.kMap.getOrVoid vid # Use the first vertex ID from the `vis` list as representant for all others
let lbl = db.top.kMap.getOrVoid vids[0]
if lbl == hashLbl: if lbl == hashLbl:
if db.top.sTab.hasKey vid: if db.top.sTab.hasKey vids[0]:
for n in 1 ..< vids.len:
if not db.top.sTab.hasKey vids[n]:
return err(MergeHashKeyRevLookUpGarbled)
# This is tyically considered OK # This is tyically considered OK
return err(MergeHashKeyCachedAlready) return err(MergeHashKeyCachedAlready)
# Otherwise proceed # Otherwise proceed
@ -525,13 +545,27 @@ proc mergeNodeImpl(
# Different key assigned => error # Different key assigned => error
return err(MergeHashKeyDiffersFromCached) return err(MergeHashKeyDiffersFromCached)
let (vtx, hasVtx) = block: # While the vertex referred to by `vids[0]` does not exists in the top layer
let vty = db.getVtx vid # cache it may well be in some lower layers or the backend. This typically
# happens for the root node.
var (vtx, hasVtx) = block:
let vty = db.getVtx vids[0]
if vty.isValid: if vty.isValid:
(vty, true) (vty, true)
else: else:
(node.to(VertexRef), false) (node.to(VertexRef), false)
# Verify that all `vids` entries are similar
for n in 1 ..< vids.len:
let w = vids[n]
if lbl != db.top.kMap.getOrVoid(w) or db.top.sTab.hasKey(w):
return err(MergeHashKeyRevLookUpGarbled)
if not hasVtx:
# Prefer existing node which has all links available, already.
let u = db.getVtx w
if u.isValid:
(vtx, hasVtx) = (u, true)
# The `vertexID <-> hashLabel` mappings need to be set up now (if any) # The `vertexID <-> hashLabel` mappings need to be set up now (if any)
case node.vType: case node.vType:
of Leaf: of Leaf:
@ -539,37 +573,30 @@ proc mergeNodeImpl(
of Extension: of Extension:
if node.key[0].isValid: if node.key[0].isValid:
let eLbl = HashLabel(root: rootVid, key: node.key[0]) let eLbl = HashLabel(root: rootVid, key: node.key[0])
if hasVtx: if not hasVtx:
if not vtx.eVid.isValid: # Brand new reverse lookup link for this vertex
return err(MergeNodeVtxDiffersFromExisting) vtx.eVid = db.vidAttach eLbl
db.top.pAmk[eLbl] = vtx.eVid elif not vtx.eVid.isValid:
else: return err(MergeNodeVtxDiffersFromExisting)
let eVid = db.top.pAmk.getOrVoid eLbl db.top.pAmk.append(eLbl, vtx.eVid)
if eVid.isValid:
vtx.eVid = eVid
else:
vtx.eVid = db.vidAttach eLbl
of Branch: of Branch:
for n in 0..15: for n in 0..15:
if node.key[n].isValid: if node.key[n].isValid:
let bLbl = HashLabel(root: rootVid, key: node.key[n]) let bLbl = HashLabel(root: rootVid, key: node.key[n])
if hasVtx: if not hasVtx:
if not vtx.bVid[n].isValid: # Brand new reverse lookup link for this vertex
return err(MergeNodeVtxDiffersFromExisting) vtx.bVid[n] = db.vidAttach bLbl
db.top.pAmk[bLbl] = vtx.bVid[n] elif not vtx.bVid[n].isValid:
else: return err(MergeNodeVtxDiffersFromExisting)
let bVid = db.top.pAmk.getOrVoid bLbl db.top.pAmk.append(bLbl, vtx.bVid[n])
if bVid.isValid:
vtx.bVid[n] = bVid
else:
vtx.bVid[n] = db.vidAttach bLbl
db.top.pPrf.incl vid for w in vids:
if not hasVtx or db.getKey(vid) != hashKey: db.top.pPrf.incl w
db.top.sTab[vid] = vtx if not hasVtx or db.getKey(w) != hashKey:
db.top.dirty = true # Modified top level cache db.top.sTab[w] = vtx.dup
db.top.dirty = true # Modified top level cache
ok vid ok()
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions # Public functions
@ -645,25 +672,22 @@ proc merge*(
): Result[bool,AristoError] = ): Result[bool,AristoError] =
## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie` ## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie`
## object. ## object.
let lty = LeafTie(root: root, path: ? path.initNibbleRange.pathToTag) let lty = LeafTie(root: root, path: ? path.pathToTag)
db.merge(lty, payload).to(typeof result) db.merge(lty, payload).to(typeof result)
proc merge*( proc merge*(
db: AristoDbRef; # Database, top layer db: AristoDbRef; # Database, top layer
root: VertexID; # MPT state root root: VertexID; # MPT state root
path: openArray[byte]; # Leaf item to add to the database path: openArray[byte]; # Leaf item to add to the database
data: openArray[byte]; # Payload value data: openArray[byte]; # Raw data payload value
): Result[bool,AristoError] = ): Result[bool,AristoError] =
## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie` ## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie`.
## object. The payload argument `data` will be stored as `RlpData` if ## The argument `data` is stored as-is as a a `RawData` payload value.
## the `root` argument is `VertexID(1)`, and as `RawData` otherwise. db.merge(root, path, PayloadRef(pType: RawData, rawBlob: @data))
let pyl = if root == VertexID(1): PayloadRef(pType: RlpData, rlpBlob: @data)
else: PayloadRef(pType: RawData, rawBlob: @data)
db.merge(root, path, pyl)
proc merge*( proc merge*(
db: AristoDbRef; # Database, top layer db: AristoDbRef; # Database, top layer
leaf: LeafTiePayload # Leaf item to add to the database leaf: LeafTiePayload; # Leaf item to add to the database
): Result[bool,AristoError] = ): Result[bool,AristoError] =
## Variant of `merge()`. This function will not indicate if the leaf ## Variant of `merge()`. This function will not indicate if the leaf
## was cached, already. ## was cached, already.
@ -691,7 +715,7 @@ proc merge*(
path: PathID; # Path into database path: PathID; # Path into database
rlpData: openArray[byte]; # RLP encoded payload data rlpData: openArray[byte]; # RLP encoded payload data
): Result[bool,AristoError] = ): Result[bool,AristoError] =
## Variant of `merge()` for storing a single item with implicte state root ## Variant of `merge()` for storing a single item with implicit state root
## argument `VertexID(1)`. ## argument `VertexID(1)`.
## ##
db.merge( db.merge(
@ -714,6 +738,19 @@ proc merge*(
## into the `Aristo Trie` database. This function is intended to be used with ## into the `Aristo Trie` database. This function is intended to be used with
## the proof nodes as returened by `snap/1` messages. ## the proof nodes as returened by `snap/1` messages.
## ##
proc update(
seen: var Table[HashKey,NodeRef];
todo: var KeyedQueueNV[NodeRef];
key: HashKey;
) {.gcsafe, raises: [RlpError].} =
## Check for embedded nodes, i.e. fully encoded node instead of a hash
if key.isValid and key.len < 32:
let lid = @key.digestTo(HashKey)
if not seen.hasKey lid:
let node = @key.decode(NodeRef)
discard todo.append node
seen[lid] = node
if not rootVid.isValid: if not rootVid.isValid:
return (0,0,MergeRootVidInvalid) return (0,0,MergeRootVidInvalid)
let rootKey = db.getKey rootVid let rootKey = db.getKey rootVid
@ -725,9 +762,25 @@ proc merge*(
for w in proof: for w in proof:
let let
key = w.Blob.digestTo(HashKey) key = w.Blob.digestTo(HashKey)
node = w.Blob.decode(NodeRef) node = rlp.decode(w.Blob,NodeRef)
if node.error != AristoError(0):
return (0,0,node.error)
nodeTab[key] = node nodeTab[key] = node
# Check for embedded nodes, i.e. fully encoded node instead of a hash
var embNodes: KeyedQueueNV[NodeRef]
discard embNodes.append node
while true:
let node = embNodes.shift.valueOr: break
case node.vType:
of Leaf:
discard
of Branch:
for n in 0 .. 15:
nodeTab.update(embNodes, node.key[n])
of Extension:
nodeTab.update(embNodes, node.key[0])
# Create a table with back links # Create a table with back links
var var
backLink: Table[HashKey,HashKey] backLink: Table[HashKey,HashKey]
@ -761,7 +814,7 @@ proc merge*(
nodeKey = w nodeKey = w
while nodeKey.isValid and nodeTab.hasKey nodeKey: while nodeKey.isValid and nodeTab.hasKey nodeKey:
chain.add nodeKey chain.add nodeKey
nodeKey = backLink.getOrDefault(nodeKey, VOID_HASH_KEY) nodeKey = backLink.getOrVoid nodeKey
if 0 < chain.len and chain[^1] == rootKey: if 0 < chain.len and chain[^1] == rootKey:
chains.add chain chains.add chain
@ -769,9 +822,9 @@ proc merge*(
block: block:
let let
lbl = HashLabel(root: rootVid, key: rootKey) lbl = HashLabel(root: rootVid, key: rootKey)
vid = db.top.pAmk.getOrVoid lbl vids = db.top.pAmk.getOrVoid lbl
if not vid.isvalid: if not vids.isValid:
db.top.pAmk[lbl] = rootVid db.top.pAmk.append(lbl, rootVid)
db.top.dirty = true # Modified top level cache db.top.dirty = true # Modified top level cache
# Process over chains in reverse mode starting with the root node. This # Process over chains in reverse mode starting with the root node. This
@ -782,13 +835,9 @@ proc merge*(
# Process the root ID which is common to all chains # Process the root ID which is common to all chains
for chain in chains: for chain in chains:
for key in chain.reversed: for key in chain.reversed:
if key in seen: if key notin seen:
discard
else:
seen.incl key seen.incl key
let let rc = db.mergeNodeImpl(key, nodeTab.getOrVoid key, rootVid)
node = nodeTab.getOrDefault(key, NodeRef(nil))
rc = db.mergeNodeImpl(key, node, rootVid)
if rc.isOK: if rc.isOK:
merged.inc merged.inc
elif rc.error == MergeHashKeyCachedAlready: elif rc.error == MergeHashKeyCachedAlready:
@ -800,7 +849,7 @@ proc merge*(
proc merge*( proc merge*(
db: AristoDbRef; # Database, top layer db: AristoDbRef; # Database, top layer
rootKey: HashKey; # Merkle hash for root rootKey: Hash256; # Merkle hash for root
rootVid = VertexID(0) # Optionally, force root vertex ID rootVid = VertexID(0) # Optionally, force root vertex ID
): Result[VertexID,AristoError] = ): Result[VertexID,AristoError] =
## Set up a `rootKey` associated with a vertex ID. ## Set up a `rootKey` associated with a vertex ID.
@ -820,28 +869,30 @@ proc merge*(
if not rootKey.isValid: if not rootKey.isValid:
return err(MergeRootKeyInvalid) return err(MergeRootKeyInvalid)
let rootLink = rootKey.to(HashKey)
if rootVid.isValid and rootVid != VertexID(1): if rootVid.isValid and rootVid != VertexID(1):
let key = db.getKey rootVid let key = db.getKey rootVid
if key == rootKey: if key.to(Hash256) == rootKey:
return ok rootVid return ok rootVid
if not key.isValid: if not key.isValid:
db.vidAttach(HashLabel(root: rootVid, key: rootKey), rootVid) db.vidAttach(HashLabel(root: rootVid, key: rootLink), rootVid)
return ok rootVid return ok rootVid
else: else:
let key = db.getKey VertexID(1) let key = db.getKey VertexID(1)
if key == rootKey: if key.to(Hash256) == rootKey:
return ok VertexID(1) return ok VertexID(1)
# Otherwise assign unless valid # Otherwise assign unless valid
if not key.isValid: if not key.isValid:
db.vidAttach(HashLabel(root: VertexID(1), key: rootKey), VertexID(1)) db.vidAttach(HashLabel(root: VertexID(1), key: rootLink), VertexID(1))
return ok VertexID(1) return ok VertexID(1)
# Create and assign a new root key # Create and assign a new root key
if not rootVid.isValid: if not rootVid.isValid:
let vid = db.vidFetch let vid = db.vidFetch
db.vidAttach(HashLabel(root: vid, key: rootKey), vid) db.vidAttach(HashLabel(root: vid, key: rootLink), vid)
return ok vid return ok vid
err(MergeRootKeyDiffersForVid) err(MergeRootKeyDiffersForVid)

View File

@ -375,9 +375,9 @@ proc nearbyNextLeafTie(
if 0 < hike.legs.len: if 0 < hike.legs.len:
if hike.legs[^1].wp.vtx.vType != Leaf: if hike.legs[^1].wp.vtx.vType != Leaf:
return err((hike.legs[^1].wp.vid,NearbyLeafExpected)) return err((hike.legs[^1].wp.vid,NearbyLeafExpected))
let rc = hike.legsTo(NibblesSeq).pathToKey let rc = hike.legsTo(NibblesSeq).pathToTag
if rc.isOk: if rc.isOk:
return ok rc.value.to(PathID) return ok rc.value
return err((VertexID(0),rc.error)) return err((VertexID(0),rc.error))
err((VertexID(0),NearbyLeafExpected)) err((VertexID(0),NearbyLeafExpected))

View File

@ -36,34 +36,51 @@ func pathPfxPad*(pfx: NibblesSeq; dblNibble: static[byte]): NibblesSeq
# Public functions # Public functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
func pathAsBlob*(keyOrTag: HashKey|PathID): Blob = func pathAsBlob*(tag: PathID): Blob =
keyOrTag.to(NibblesSeq).hexPrefixEncode(isLeaf=true) ## Convert the `tag` argument to a sequence of an even number of nibbles
## represented by a `Blob`. If the argument `tag` represents an odd number
## of nibbles, a zero nibble is appendend.
##
## This function is useful only if there is a tacit agreement that all paths
## used to index database leaf values can be represented as `Blob`, i.e.
## `PathID` type paths with an even number of nibbles.
if 0 < tag.length:
let key = @(tag.pfx.UInt256.toBytesBE)
if 64 <= tag.length:
return key
else:
return key[0 .. (tag.length + 1) div 2]
func pathToKey*(partPath: NibblesSeq): Result[HashKey,AristoError] = func pathAsHEP*(tag: PathID; isLeaf = false): Blob =
var key: ByteArray32 ## Convert the `tag` argument to a hex encoded partial path as used in `eth`
if partPath.len == 64: ## or `snap` protocol where full paths of nibble length 64 are encoded as 32
# Trailing dummy nibbles (aka no nibbles) force a nibble seq reorg ## byte `Blob` and non-leaf partial paths are *compact encoded* (i.e. per
let path = (partPath & EmptyNibbleSeq).getBytes() ## the Ethereum wire protocol.)
(addr key[0]).copyMem(unsafeAddr path[0], 32) if 64 <= tag.length:
return ok(key.HashKey) @(tag.pfx.UInt256.toBytesBE)
err(PathExpected64Nibbles) else:
tag.to(NibblesSeq).hexPrefixEncode(isLeaf=true)
func pathToKey*(
partPath: openArray[byte];
): Result[HashKey,AristoError] =
let (isLeaf,pathSegment) = partPath.hexPrefixDecode
if isleaf:
return pathSegment.pathToKey()
err(PathExpectedLeaf)
func pathToTag*(partPath: NibblesSeq): Result[PathID,AristoError] = func pathToTag*(partPath: NibblesSeq): Result[PathID,AristoError] =
## Nickname `tag` for `PathID` ## Convert the argument `partPath` to a `PathID` type value.
if partPath.len == 0:
return ok PathID()
if partPath.len <= 64: if partPath.len <= 64:
return ok PathID( return ok PathID(
pfx: UInt256.fromBytesBE partPath.pathPfxPad(0).getBytes(), pfx: UInt256.fromBytesBE partPath.pathPfxPad(0).getBytes(),
length: partPath.len.uint8) length: partPath.len.uint8)
err(PathAtMost64Nibbles) err(PathAtMost64Nibbles)
func pathToTag*(partPath: openArray[byte]): Result[PathID,AristoError] =
## Variant of `pathToTag()`
if partPath.len == 0:
return ok PathID()
if partPath.len <= 32:
return ok PathID(
pfx: UInt256.fromBytesBE @partPath & 0u8.repeat(32-partPath.len),
length: 2 * partPath.len.uint8)
err(PathAtMost64Nibbles)
# -------------------- # --------------------
func pathPfxPad*(pfx: NibblesSeq; dblNibble: static[byte]): NibblesSeq = func pathPfxPad*(pfx: NibblesSeq; dblNibble: static[byte]): NibblesSeq =
@ -85,14 +102,6 @@ func pathPfxPad*(pfx: NibblesSeq; dblNibble: static[byte]): NibblesSeq =
let nope = seq[byte].default.initNibbleRange let nope = seq[byte].default.initNibbleRange
result = pfx.slice(0,64) & nope # nope forces re-alignment result = pfx.slice(0,64) & nope # nope forces re-alignment
func pathPfxPadKey*(pfx: NibblesSeq; dblNibble: static[byte]): HashKey =
## Variant of `pathPfxPad()`.
##
## Extend (or cut) the argument nibbles sequence `pfx` for generating a
## `HashKey`.
let bytes = pfx.pathPfxPad(dblNibble).getBytes
(addr result.ByteArray32[0]).copyMem(unsafeAddr bytes[0], bytes.len)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -0,0 +1,183 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
std/sequtils,
eth/[common, rlp, trie/nibbles],
results,
"."/[aristo_constants, aristo_desc, aristo_get]
# Annotation helper
{.pragma: noRaise, gcsafe, raises: [].}
type
ResolveVidFn = proc(vid: VertexID): Result[HashKey,AristoError] {.noRaise.}
## Resolve storage root vertex ID
# ------------------------------------------------------------------------------
# Private helper
# ------------------------------------------------------------------------------
proc aristoError(error: AristoError): NodeRef =
## Allows returning de
NodeRef(vType: Leaf, error: error)
proc serialise(
pyl: PayloadRef;
getKey: ResolveVidFn;
): Result[Blob,(VertexID,AristoError)] =
## Encode the data payload of the argument `pyl` as RLP `Blob` if it is of
## account type, otherwise pass the data as is.
##
case pyl.pType:
of RawData:
ok pyl.rawBlob
of RlpData:
ok pyl.rlpBlob
of AccountData:
let
vid = pyl.account.storageID
key = block:
if vid.isValid:
vid.getKey.valueOr:
let w = (vid,error)
return err(w)
else:
VOID_HASH_KEY
ok rlp.encode Account(
nonce: pyl.account.nonce,
balance: pyl.account.balance,
storageRoot: key.to(Hash256),
codeHash: pyl.account.codeHash)
# ------------------------------------------------------------------------------
# Public RLP transcoder mixins
# ------------------------------------------------------------------------------
proc read*(rlp: var Rlp; T: type NodeRef): T {.gcsafe, raises: [RlpError].} =
## Mixin for RLP writer, see `fromRlpRecord()` for an encoder with detailed
## error return code (if needed.) This reader is a jazzed up version which
## reports some particular errors in the `Dummy` type node.
if not rlp.isList:
# Otherwise `rlp.items` would raise a `Defect`
return aristoError(Rlp2Or17ListEntries)
var
blobs = newSeq[Blob](2) # temporary, cache
links: array[16,HashKey] # reconstruct branch node
top = 0 # count entries and positions
# Collect lists of either 2 or 17 blob entries.
for w in rlp.items:
case top
of 0, 1:
if not w.isBlob:
return aristoError(RlpBlobExpected)
blobs[top] = rlp.read(Blob)
of 2 .. 15:
let blob = rlp.read(Blob)
links[top] = HashKey.fromBytes(blob).valueOr:
return aristoError(RlpBranchHashKeyExpected)
of 16:
if not w.isBlob or 0 < rlp.read(Blob).len:
return aristoError(RlpEmptyBlobExpected)
else:
return aristoError(Rlp2Or17ListEntries)
top.inc
# Verify extension data
case top
of 2:
if blobs[0].len == 0:
return aristoError(RlpNonEmptyBlobExpected)
let (isLeaf, pathSegment) = hexPrefixDecode blobs[0]
if isLeaf:
return NodeRef(
vType: Leaf,
lPfx: pathSegment,
lData: PayloadRef(
pType: RawData,
rawBlob: blobs[1]))
else:
var node = NodeRef(
vType: Extension,
ePfx: pathSegment)
node.key[0] = HashKey.fromBytes(blobs[1]).valueOr:
return aristoError(RlpExtHashKeyExpected)
return node
of 17:
for n in [0,1]:
links[n] = HashKey.fromBytes(blobs[n]).valueOr:
return aristoError(RlpBranchHashKeyExpected)
return NodeRef(
vType: Branch,
key: links)
else:
discard
aristoError(Rlp2Or17ListEntries)
proc append*(writer: var RlpWriter; node: NodeRef) =
## Mixin for RLP writer. Note that a `Dummy` node is encoded as an empty
## list.
func addHashKey(w: var RlpWriter; key: HashKey) =
if 1 < key.len and key.len < 32:
w.appendRawBytes @key
else:
w.append @key
if node.error != AristoError(0):
writer.startList(0)
else:
case node.vType:
of Branch:
writer.startList(17)
for n in 0..15:
writer.addHashKey node.key[n]
writer.append EmptyBlob
of Extension:
writer.startList(2)
writer.append node.ePfx.hexPrefixEncode(isleaf = false)
writer.addHashKey node.key[0]
of Leaf:
proc getKey0(vid: VertexID): Result[HashKey,AristoError] {.noRaise.} =
ok(node.key[0]) # always succeeds
writer.startList(2)
writer.append node.lPfx.hexPrefixEncode(isleaf = true)
writer.append node.lData.serialise(getKey0).value
# ---------------------
proc digestTo*(node: NodeRef; T: type HashKey): T =
## Convert the argument `node` to the corresponding Merkle hash key
rlp.encode(node).digestTo(HashKey)
proc serialise*(
db: AristoDbRef;
pyl: PayloadRef;
): Result[Blob,(VertexID,AristoError)] =
## Encode the data payload of the argument `pyl` as RLP `Blob` if it is of
## account type, otherwise pass the data as is.
##
proc getKey(vid: VertexID): Result[HashKey,AristoError] =
db.getKeyRc(vid)
pyl.serialise getKey
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,67 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Aristo DB -- Sign Helper
## ========================
##
{.push raises: [].}
import
eth/common,
results,
"."/[aristo_constants, aristo_desc, aristo_get, aristo_hashify, aristo_init,
aristo_merge, aristo_vid]
# ------------------------------------------------------------------------------
# Public functions, signature generator
# ------------------------------------------------------------------------------
proc merkleSignBegin*(): MerkleSignRef =
## Start signature calculator for a list of key-value items.
let
db = AristoDbRef.init VoidBackendRef
vid = db.vidFetch # => 2
MerkleSignRef(
root: vid,
db: db)
proc merkleSignAdd*(
sdb: MerkleSignRef;
key: openArray[byte];
val: openArray[byte];
) =
## Add key-value item to the signature list. The order of the items to add
## is irrelevant.
if sdb.error == AristoError(0):
sdb.count.inc
discard sdb.db.merge(sdb.root, key, val).valueOr:
sdb.`error` = error
sdb.errKey = @key
return
proc merkleSignCommit*(
sdb: MerkleSignRef;
): Result[HashKey,(Blob,AristoError)] =
## Finish with the list, calculate signature and return it.
if sdb.count == 0:
return ok VOID_HASH_KEY
if sdb.error != AristoError(0):
return err((sdb.errKey, sdb.error))
discard sdb.db.hashify().valueOr:
let w = (EmptyBlob, error[1])
return err(w)
let hash = sdb.db.getKeyRc(sdb.root).valueOr:
let w = (EmptyBlob, error)
return err(w)
ok hash
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -92,10 +92,15 @@ proc toNode*(
vtx: VertexRef; # Vertex to convert vtx: VertexRef; # Vertex to convert
db: AristoDbRef; # Database, top layer db: AristoDbRef; # Database, top layer
stopEarly = true; # Full list of missing links if `false` stopEarly = true; # Full list of missing links if `false`
beKeyOk = false; # Allow fetching DB backend keys
): Result[NodeRef,seq[VertexID]] = ): Result[NodeRef,seq[VertexID]] =
## Convert argument the vertex `vtx` to a node type. Missing Merkle hash ## Convert argument the vertex `vtx` to a node type. Missing Merkle hash
## keys are searched for on the argument database `db`. ## keys are searched for on the argument database `db`.
## ##
## If backend keys are allowed by passing `beKeyOk` as `true`, there is no
## compact embedding of a small node into another rather than its hash
## reference. In that case, the hash reference will always be used.
##
## On error, at least the vertex ID of the first missing Merkle hash key is ## On error, at least the vertex ID of the first missing Merkle hash key is
## returned. If the argument `stopEarly` is set `false`, all missing Merkle ## returned. If the argument `stopEarly` is set `false`, all missing Merkle
## hash keys are returned. ## hash keys are returned.
@ -108,10 +113,13 @@ proc toNode*(
let vid = vtx.lData.account.storageID let vid = vtx.lData.account.storageID
if vid.isValid: if vid.isValid:
let key = db.getKey vid let key = db.getKey vid
if not key.isValid: if key.isValid:
node.key[0] = key
else:
return err(@[vid]) return err(@[vid])
node.key[0] = key node.key[0] = key
return ok node return ok node
of Branch: of Branch:
let node = NodeRef(vType: Branch, bVid: vtx.bVid) let node = NodeRef(vType: Branch, bVid: vtx.bVid)
var missing: seq[VertexID] var missing: seq[VertexID]
@ -121,24 +129,23 @@ proc toNode*(
let key = db.getKey vid let key = db.getKey vid
if key.isValid: if key.isValid:
node.key[n] = key node.key[n] = key
elif stopEarly:
return err(@[vid])
else: else:
missing.add vid missing.add vid
if stopEarly:
break
else:
node.key[n] = VOID_HASH_KEY
if 0 < missing.len: if 0 < missing.len:
return err(missing) return err(missing)
return ok node return ok node
of Extension: of Extension:
let let
vid = vtx.eVid vid = vtx.eVid
key = db.getKey vid key = db.getKey vid
if key.isValid: if not key.isValid:
let node = NodeRef(vType: Extension, ePfx: vtx.ePfx, eVid: vid) return err(@[vid])
node.key[0] = key let node = NodeRef(vType: Extension, ePfx: vtx.ePfx, eVid: vid)
return ok node node.key[0] = key
return err(@[vid]) return ok node
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

View File

@ -97,7 +97,7 @@ proc vidReorg*(vGen: seq[VertexID]): seq[VertexID] =
proc vidAttach*(db: AristoDbRef; lbl: HashLabel; vid: VertexID) = proc vidAttach*(db: AristoDbRef; lbl: HashLabel; vid: VertexID) =
## Attach (i.r. register) a Merkle hash key to a vertex ID. ## Attach (i.r. register) a Merkle hash key to a vertex ID.
db.top.pAmk[lbl] = vid db.top.pAmk.append(lbl, vid)
db.top.kMap[vid] = lbl db.top.kMap[vid] = lbl
db.top.dirty = true # Modified top level cache db.top.dirty = true # Modified top level cache

View File

@ -77,11 +77,10 @@ template mapRlpException(db: LegacyDbRef; info: static[string]; code: untyped) =
try: try:
code code
except RlpError as e: except RlpError as e:
return err(db.bless LegacyCoreDbError( return err(db.bless(RlpException, LegacyCoreDbError(
error: RlpException, ctx: info,
ctx: info, name: $e.name,
name: $e.name, msg: e.msg)))
msg: e.msg))
template reraiseRlpException(info: static[string]; code: untyped) = template reraiseRlpException(info: static[string]; code: untyped) =
try: try:
@ -183,7 +182,10 @@ proc kvtMethods(db: LegacyDbRef): CoreDbKvtFns =
db.bless(LegacyCoreDbKvtBE(tdb: tdb)), db.bless(LegacyCoreDbKvtBE(tdb: tdb)),
getFn: proc(k: openArray[byte]): CoreDbRc[Blob] = getFn: proc(k: openArray[byte]): CoreDbRc[Blob] =
ok(tdb.get(k)), let data = tdb.get(k)
if 0 < data.len:
return ok(data)
err(db.bless(KvtNotFound, LegacyCoreDbError(ctx: "getFn()"))),
delFn: proc(k: openArray[byte]): CoreDbRc[void] = delFn: proc(k: openArray[byte]): CoreDbRc[void] =
tdb.del(k) tdb.del(k)
@ -193,7 +195,7 @@ proc kvtMethods(db: LegacyDbRef): CoreDbKvtFns =
tdb.put(k,v) tdb.put(k,v)
ok(), ok(),
containsFn: proc(k: openArray[byte]): CoreDbRc[bool] = hasKeyFn: proc(k: openArray[byte]): CoreDbRc[bool] =
ok(tdb.contains(k)), ok(tdb.contains(k)),
pairsIt: iterator(): (Blob, Blob) = pairsIt: iterator(): (Blob, Blob) =
@ -207,21 +209,24 @@ proc mptMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbMptFns =
db.bless(LegacyCoreDbMptBE(mpt: mpt.trie)), db.bless(LegacyCoreDbMptBE(mpt: mpt.trie)),
fetchFn: proc(k: openArray[byte]): CoreDbRc[Blob] = fetchFn: proc(k: openArray[byte]): CoreDbRc[Blob] =
db.mapRlpException("legacy/mpt/get()"): db.mapRlpException("fetchFn()"):
return ok(mpt.trie.get(k)), let data = mpt.trie.get(k)
if 0 < data.len:
return ok(data)
err(db.bless(MptNotFound, LegacyCoreDbError(ctx: "fetchFn()"))),
deleteFn: proc(k: openArray[byte]): CoreDbRc[void] = deleteFn: proc(k: openArray[byte]): CoreDbRc[void] =
db.mapRlpException("legacy/mpt/del()"): db.mapRlpException("deleteFn()"):
mpt.trie.del(k) mpt.trie.del(k)
ok(), ok(),
mergeFn: proc(k: openArray[byte]; v: openArray[byte]): CoreDbRc[void] = mergeFn: proc(k: openArray[byte]; v: openArray[byte]): CoreDbRc[void] =
db.mapRlpException("legacy/mpt/put()"): db.mapRlpException("mergeFn()"):
mpt.trie.put(k,v) mpt.trie.put(k,v)
ok(), ok(),
containsFn: proc(k: openArray[byte]): CoreDbRc[bool] = hasPathFn: proc(k: openArray[byte]): CoreDbRc[bool] =
db.mapRlpException("legacy/mpt/put()"): db.mapRlpException("hasPathFn()"):
return ok(mpt.trie.contains(k)), return ok(mpt.trie.contains(k)),
rootVidFn: proc(): CoreDbVidRef = rootVidFn: proc(): CoreDbVidRef =
@ -231,15 +236,14 @@ proc mptMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbMptFns =
mpt.trie.isPruning, mpt.trie.isPruning,
pairsIt: iterator: (Blob,Blob) {.gcsafe, raises: [LegacyApiRlpError].} = pairsIt: iterator: (Blob,Blob) {.gcsafe, raises: [LegacyApiRlpError].} =
reraiseRlpException("legacy/mpt/pairs()"): reraiseRlpException("pairsIt()"):
for k,v in mpt.trie.pairs(): for k,v in mpt.trie.pairs():
yield (k,v), yield (k,v),
replicateIt: iterator: (Blob,Blob) {.gcsafe, raises: [LegacyApiRlpError].} = replicateIt: iterator: (Blob,Blob) {.gcsafe, raises: [LegacyApiRlpError].} =
reraiseRlpException("legacy/mpt/replicate()"): reraiseRlpException("replicateIt()"):
for k,v in mpt.trie.replicate(): for k,v in mpt.trie.replicate():
yield (k,v) yield (k,v))
)
proc accMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbAccFns = proc accMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbAccFns =
## Hexary trie database handlers ## Hexary trie database handlers
@ -248,22 +252,24 @@ proc accMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbAccFns =
db.bless(LegacyCoreDbAccBE(mpt: mpt.trie)), db.bless(LegacyCoreDbAccBE(mpt: mpt.trie)),
fetchFn: proc(k: EthAddress): CoreDbRc[CoreDbAccount] = fetchFn: proc(k: EthAddress): CoreDbRc[CoreDbAccount] =
const info = "legacy/mpt/getAccount()" db.mapRlpException "fetchFn()":
db.mapRlpException info: let data = mpt.trie.get(k.keccakHash.data)
return ok mpt.trie.get(k.keccakHash.data).toCoreDbAccount(db), if 0 < data.len:
return ok data.toCoreDbAccount(db)
err(db.bless(AccNotFound, LegacyCoreDbError(ctx: "fetchFn()"))),
deleteFn: proc(k: EthAddress): CoreDbRc[void] = deleteFn: proc(k: EthAddress): CoreDbRc[void] =
db.mapRlpException("legacy/mpt/del()"): db.mapRlpException("deleteFn()"):
mpt.trie.del(k.keccakHash.data) mpt.trie.del(k.keccakHash.data)
ok(), ok(),
mergeFn: proc(k: EthAddress; v: CoreDbAccount): CoreDbRc[void] = mergeFn: proc(k: EthAddress; v: CoreDbAccount): CoreDbRc[void] =
db.mapRlpException("legacy/mpt/put()"): db.mapRlpException("mergeFn()"):
mpt.trie.put(k.keccakHash.data, rlp.encode v.toAccount) mpt.trie.put(k.keccakHash.data, rlp.encode v.toAccount)
ok(), ok(),
containsFn: proc(k: EthAddress): CoreDbRc[bool] = hasPathFn: proc(k: EthAddress): CoreDbRc[bool] =
db.mapRlpException("legacy/mpt/put()"): db.mapRlpException("hasPath()"):
return ok(mpt.trie.contains k.keccakHash.data), return ok(mpt.trie.contains k.keccakHash.data),
rootVidFn: proc(): CoreDbVidRef = rootVidFn: proc(): CoreDbVidRef =
@ -344,7 +350,7 @@ proc baseMethods(
if createOk or tdb.contains(root.data): if createOk or tdb.contains(root.data):
return ok(db.bless LegacyCoreDbVid(vHash: root)) return ok(db.bless LegacyCoreDbVid(vHash: root))
err(db.bless LegacyCoreDbError(error: RootNotFound, ctx: "getRoot()")), err(db.bless(RootNotFound, LegacyCoreDbError(ctx: "getRoot()"))),
newKvtFn: proc(): CoreDxKvtRef = newKvtFn: proc(): CoreDxKvtRef =
db.kvt, db.kvt,

View File

@ -1,5 +1,5 @@
# Nimbus # Nimbus
# Copyright (c) 2018 Status Research & Development GmbH # Copyright (c) 2018-2023 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)
@ -16,6 +16,7 @@ import
eth/common, eth/common,
results, results,
"../.."/[constants, errors], "../.."/[constants, errors],
../aristo/aristo_constants, # `EmptyBlob`
./base/[base_desc, validate] ./base/[base_desc, validate]
export export
@ -45,7 +46,7 @@ else:
const AutoValidateDescriptors = true const AutoValidateDescriptors = true
const const
ProvideCoreDbLegacyAPI = true ProvideCoreDbLegacyAPI* = true # and false
EnableApiTracking = true and false EnableApiTracking = true and false
## When enabled, functions using this tracking facility need to import ## When enabled, functions using this tracking facility need to import
@ -106,34 +107,35 @@ template itNotImplemented(db: CoreDbRef, name: string) =
when EnableApiTracking: when EnableApiTracking:
import std/[sequtils, strutils], stew/byteutils import std/[sequtils, strutils], stew/byteutils
template newApiTxt(info: static[string]): static[string] =
logTxt "new API " & info
template legaApiTxt(info: static[string]): static[string] =
logTxt "legacy API " & info
func getParent(w: CoreDxChldRefs): auto = func getParent(w: CoreDxChldRefs): auto =
## Avoida inifinite call to `parent()` in `ifTrack*Api()` tmplates ## Avoida inifinite call to `parent()` in `ifTrack*Api()` tmplates
w.parent w.parent
template setTrackLegaApiOnly(w: CoreDbChldRefs|CoreDbRef) = when ProvideCoreDbLegacyAPI:
when typeof(w) is CoreDbRef: template legaApiTxt(info: static[string]): static[string] =
let db = w logTxt "legacy API " & info
else:
let db = w.distinctBase.getParent
let save = db.trackNewApi
# Prevent from cascaded logging
db.trackNewApi = false
defer: db.trackNewApi = save
template ifTrackLegaApi(w: CoreDbChldRefs|CoreDbRef; code: untyped) = template setTrackLegaApiOnly(w: CoreDbChldRefs|CoreDbRef) =
block:
when typeof(w) is CoreDbRef: when typeof(w) is CoreDbRef:
let db = w let db = w
else: else:
let db = w.distinctBase.getParent let db = w.distinctBase.getParent
if db.trackLegaApi: let save = db.trackNewApi
code # Prevent from cascaded logging
db.trackNewApi = false
defer: db.trackNewApi = save
template ifTrackLegaApi(w: CoreDbChldRefs|CoreDbRef; code: untyped) =
block:
when typeof(w) is CoreDbRef:
let db = w
else:
let db = w.distinctBase.getParent
if db.trackLegaApi:
code
template newApiTxt(info: static[string]): static[string] =
logTxt "new API " & info
template ifTrackNewApi(w: CoreDxChldRefs|CoreDbRef; code: untyped) = template ifTrackNewApi(w: CoreDxChldRefs|CoreDbRef; code: untyped) =
block: block:
@ -190,8 +192,9 @@ when EnableApiTracking:
proc toStr(rc: CoreDbRc[CoreDxCaptRef]): string = rc.toStr "captRef" proc toStr(rc: CoreDbRc[CoreDxCaptRef]): string = rc.toStr "captRef"
else: else:
template setTrackLegaApiOnly(w: CoreDbChldRefs|CoreDbRef) = discard when ProvideCoreDbLegacyAPI:
template ifTrackLegaApi(w: CoreDbChldRefs|CoreDbRef; code: untyped) = discard template setTrackLegaApiOnly(w: CoreDbChldRefs|CoreDbRef) = discard
template ifTrackLegaApi(w: CoreDbChldRefs|CoreDbRef; c: untyped) = discard
template ifTrackNewApi(w: CoreDxChldRefs|CoreDbRef; code: untyped) = discard template ifTrackNewApi(w: CoreDxChldRefs|CoreDbRef; code: untyped) = discard
# --------- # ---------
@ -214,9 +217,9 @@ func toCoreDxPhkRef(mpt: CoreDxMptRef): CoreDxPhkRef =
proc(k:openArray[byte]; v: openArray[byte]): CoreDbRc[void] = proc(k:openArray[byte]; v: openArray[byte]): CoreDbRc[void] =
mpt.methods.mergeFn(k.keccakHash.data, v) mpt.methods.mergeFn(k.keccakHash.data, v)
result.methods.containsFn = result.methods.hasPathFn =
proc(k: openArray[byte]): CoreDbRc[bool] = proc(k: openArray[byte]): CoreDbRc[bool] =
mpt.methods.containsFn(k.keccakHash.data) mpt.methods.hasPathFn(k.keccakHash.data)
result.methods.pairsIt = result.methods.pairsIt =
iterator(): (Blob, Blob) {.apiRaise.} = iterator(): (Blob, Blob) {.apiRaise.} =
@ -244,6 +247,7 @@ proc bless*(db: CoreDbRef): CoreDbRef =
db.ifTrackNewApi: info newApiTxt "CoreDbRef.init()", dbType=db.dbType db.ifTrackNewApi: info newApiTxt "CoreDbRef.init()", dbType=db.dbType
db db
proc bless*(db: CoreDbRef; child: CoreDbVidRef): CoreDbVidRef = proc bless*(db: CoreDbRef; child: CoreDbVidRef): CoreDbVidRef =
## Complete sub-module descriptor, fill in `parent` and actvate it. ## Complete sub-module descriptor, fill in `parent` and actvate it.
child.parent = db child.parent = db
@ -252,6 +256,7 @@ proc bless*(db: CoreDbRef; child: CoreDbVidRef): CoreDbVidRef =
child.validate child.validate
child child
proc bless*(db: CoreDbRef; child: CoreDxKvtRef): CoreDxKvtRef = proc bless*(db: CoreDbRef; child: CoreDxKvtRef): CoreDxKvtRef =
## Complete sub-module descriptor, fill in `parent` and de-actvate ## Complete sub-module descriptor, fill in `parent` and de-actvate
## iterator for persistent database. ## iterator for persistent database.
@ -267,7 +272,7 @@ proc bless*(db: CoreDbRef; child: CoreDxKvtRef): CoreDxKvtRef =
child child
proc bless*[T: CoreDxTrieRelated | CoreDbErrorRef | CoreDbBackends]( proc bless*[T: CoreDxTrieRelated | CoreDbBackends](
db: CoreDbRef; db: CoreDbRef;
child: T; child: T;
): auto = ): auto =
@ -277,6 +282,18 @@ proc bless*[T: CoreDxTrieRelated | CoreDbErrorRef | CoreDbBackends](
child.validate child.validate
child child
proc bless*(
db: CoreDbRef;
error: CoreDbErrorCode;
child: CoreDbErrorRef;
): CoreDbErrorRef =
child.parent = db
child.error = error
when AutoValidateDescriptors:
child.validate
child
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public main descriptor methods # Public main descriptor methods
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -315,7 +332,7 @@ proc finish*(db: CoreDbRef; flush = false) =
proc `$$`*(e: CoreDbErrorRef): string = proc `$$`*(e: CoreDbErrorRef): string =
## Pretty print error symbol, note that this directive may have side effects ## Pretty print error symbol, note that this directive may have side effects
## as it calls a backend function. ## as it calls a backend function.
result = e.parent.methods.errorPrintFn(e) result = $e.error & "(" & e.parent.methods.errorPrintFn(e) & ")"
e.ifTrackNewApi: info newApiTxt "$$()", result e.ifTrackNewApi: info newApiTxt "$$()", result
proc hash*(vid: CoreDbVidRef): Result[Hash256,void] = proc hash*(vid: CoreDbVidRef): Result[Hash256,void] =
@ -372,7 +389,7 @@ proc getRoot*(
## This function is intended to open a virtual accounts trie database as in: ## This function is intended to open a virtual accounts trie database as in:
## :: ## ::
## proc openAccountLedger(db: CoreDbRef, rootHash: Hash256): CoreDxMptRef = ## proc openAccountLedger(db: CoreDbRef, rootHash: Hash256): CoreDxMptRef =
## let root = db.getRoot(rootHash).isOkOr: ## let root = db.getRoot(rootHash).valueOr:
## # some error handling ## # some error handling
## return ## return
## db.newAccMpt root ## db.newAccMpt root
@ -391,10 +408,21 @@ proc newKvt*(db: CoreDbRef): CoreDxKvtRef =
db.ifTrackNewApi: info newApiTxt "newKvt()" db.ifTrackNewApi: info newApiTxt "newKvt()"
proc get*(kvt: CoreDxKvtRef; key: openArray[byte]): CoreDbRc[Blob] = proc get*(kvt: CoreDxKvtRef; key: openArray[byte]): CoreDbRc[Blob] =
## This function always returns a non-empty `Blob` or an error code.
result = kvt.methods.getFn key result = kvt.methods.getFn key
kvt.ifTrackNewApi: kvt.ifTrackNewApi:
info newApiTxt "kvt/get()", key=key.toStr, result=result.toStr info newApiTxt "kvt/get()", key=key.toStr, result=result.toStr
proc getOrEmpty*(kvt: CoreDxKvtRef; key: openArray[byte]): CoreDbRc[Blob] =
## This function sort of mimics the behaviour of the legacy database
## returning an empty `Blob` if the argument `key` is not found on the
## database.
result = kvt.methods.getFn key
if result.isErr and result.error.error == KvtNotFound:
result = CoreDbRc[Blob].ok(EmptyBlob)
kvt.ifTrackNewApi:
info newApiTxt "kvt/getOrEmpty()", key=key.toStr, result=result.toStr
proc del*(kvt: CoreDxKvtRef; key: openArray[byte]): CoreDbRc[void] = proc del*(kvt: CoreDxKvtRef; key: openArray[byte]): CoreDbRc[void] =
result = kvt.methods.delFn key result = kvt.methods.delFn key
kvt.ifTrackNewApi: kvt.ifTrackNewApi:
@ -409,10 +437,11 @@ proc put*(
kvt.ifTrackNewApi: info newApiTxt "kvt/put()", kvt.ifTrackNewApi: info newApiTxt "kvt/put()",
key=key.toStr, val=val.toSeq.toStr, result=result.toStr key=key.toStr, val=val.toSeq.toStr, result=result.toStr
proc contains*(kvt: CoreDxKvtRef; key: openArray[byte]): CoreDbRc[bool] = proc hasKey*(kvt: CoreDxKvtRef; key: openArray[byte]): CoreDbRc[bool] =
result = kvt.methods.containsFn key ## Would be named `contains` if it returned `bool` rather than `Result[]`.
result = kvt.methods.hasKeyFn key
kvt.ifTrackNewApi: kvt.ifTrackNewApi:
info newApiTxt "kvt/contains()", key=key.toStr, result=result.toStr info newApiTxt "kvt/hasKey()", key=key.toStr, result=result.toStr
iterator pairs*(kvt: CoreDxKvtRef): (Blob, Blob) {.apiRaise.} = iterator pairs*(kvt: CoreDxKvtRef): (Blob, Blob) {.apiRaise.} =
## Iterator supported on memory DB (otherwise implementation dependent) ## Iterator supported on memory DB (otherwise implementation dependent)
@ -427,9 +456,16 @@ iterator pairs*(kvt: CoreDxKvtRef): (Blob, Blob) {.apiRaise.} =
proc newMpt*(db: CoreDbRef; root: CoreDbVidRef; prune = true): CoreDxMptRef = proc newMpt*(db: CoreDbRef; root: CoreDbVidRef; prune = true): CoreDxMptRef =
## Constructor, will defect on failure (note that the legacy backend ## Constructor, will defect on failure (note that the legacy backend
## always succeeds) ## always succeeds)
result = db.methods.newMptFn(root, prune).valueOr: raiseAssert $$error result = db.methods.newMptFn(root, prune).valueOr:
raiseAssert $$error
db.ifTrackNewApi: info newApiTxt "newMpt", root=root.toStr, prune db.ifTrackNewApi: info newApiTxt "newMpt", root=root.toStr, prune
proc newMpt*(db: CoreDbRef; prune = true): CoreDxMptRef =
## Shortcut for `db.newMpt CoreDbVidRef()`
result = db.methods.newMptFn(CoreDbVidRef(), prune).valueOr:
raiseAssert $$error
db.ifTrackNewApi: info newApiTxt "newMpt", prune
proc newAccMpt*(db: CoreDbRef; root: CoreDbVidRef; prune = true): CoreDxAccRef = proc newAccMpt*(db: CoreDbRef; root: CoreDbVidRef; prune = true): CoreDxAccRef =
## Similar to `newMpt()` for handling accounts. Although this sub-trie can ## Similar to `newMpt()` for handling accounts. Although this sub-trie can
## be emulated by means of `newMpt(..).toPhk()`, it is recommended using ## be emulated by means of `newMpt(..).toPhk()`, it is recommended using
@ -471,11 +507,21 @@ proc rootVid*(dsc: CoreDxTrieRefs | CoreDxAccRef): CoreDbVidRef =
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc fetch*(trie: CoreDxTrieRefs; key: openArray[byte]): CoreDbRc[Blob] = proc fetch*(trie: CoreDxTrieRefs; key: openArray[byte]): CoreDbRc[Blob] =
## Fetch data from the argument `trie` ## Fetch data from the argument `trie`. The function always returns a
## non-empty `Blob` or an error code.
result = trie.methods.fetchFn(key) result = trie.methods.fetchFn(key)
trie.ifTrackNewApi: trie.ifTrackNewApi:
info newApiTxt "trie/fetch()", key=key.toStr, result=result.toStr info newApiTxt "trie/fetch()", key=key.toStr, result=result.toStr
proc fetchOrEmpty*(trie: CoreDxTrieRefs; key: openArray[byte]): CoreDbRc[Blob] =
## This function returns an empty `Blob` if the argument `key` is not found
## on the database.
result = trie.methods.fetchFn(key)
if result.isErr and result.error.error == MptNotFound:
result = ok(EmptyBlob)
trie.ifTrackNewApi:
info newApiTxt "trie/fetch()", key=key.toStr, result=result.toStr
proc delete*(trie: CoreDxTrieRefs; key: openArray[byte]): CoreDbRc[void] = proc delete*(trie: CoreDxTrieRefs; key: openArray[byte]): CoreDbRc[void] =
result = trie.methods.deleteFn key result = trie.methods.deleteFn key
trie.ifTrackNewApi: trie.ifTrackNewApi:
@ -486,7 +532,7 @@ proc merge*(
key: openArray[byte]; key: openArray[byte];
val: openArray[byte]; val: openArray[byte];
): CoreDbRc[void] = ): CoreDbRc[void] =
when trie is CoreDbMptRef: when trie is CoreDxMptRef:
const info = "mpt/merge()" const info = "mpt/merge()"
else: else:
const info = "phk/merge()" const info = "phk/merge()"
@ -494,10 +540,11 @@ proc merge*(
trie.ifTrackNewApi: info newApiTxt info, trie.ifTrackNewApi: info newApiTxt info,
key=key.toStr, val=val.toSeq.toStr, result=result.toStr key=key.toStr, val=val.toSeq.toStr, result=result.toStr
proc contains*(trie: CoreDxTrieRefs; key: openArray[byte]): CoreDbRc[bool] = proc hasPath*(trie: CoreDxTrieRefs; key: openArray[byte]): CoreDbRc[bool] =
result = trie.methods.containsFn key ## Would be named `contains` if it returned `bool` rather than `Result[]`.
result = trie.methods.hasPathFn key
trie.ifTrackNewApi: trie.ifTrackNewApi:
info newApiTxt "trie/contains()", key=key.toStr, result=result.toStr info newApiTxt "trie/hasKey()", key=key.toStr, result=result.toStr
iterator pairs*(mpt: CoreDxMptRef): (Blob, Blob) {.apiRaise.} = iterator pairs*(mpt: CoreDxMptRef): (Blob, Blob) {.apiRaise.} =
## Trie traversal, only supported for `CoreDxMptRef` ## Trie traversal, only supported for `CoreDxMptRef`
@ -516,7 +563,7 @@ iterator replicate*(mpt: CoreDxMptRef): (Blob, Blob) {.apiRaise.} =
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc fetch*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[CoreDbAccount] = proc fetch*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[CoreDbAccount] =
## Fetch data from the argument `trie` ## Fetch data from the argument `trie`.
result = acc.methods.fetchFn address result = acc.methods.fetchFn address
acc.ifTrackNewApi: acc.ifTrackNewApi:
info newApiTxt "acc/fetch()", address=address.toStr, result=result.toStr info newApiTxt "acc/fetch()", address=address.toStr, result=result.toStr
@ -535,10 +582,11 @@ proc merge*(
acc.ifTrackNewApi: acc.ifTrackNewApi:
info newApiTxt "acc/merge()", address=address.toStr, result=result.toStr info newApiTxt "acc/merge()", address=address.toStr, result=result.toStr
proc contains*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[bool] = proc hasPath*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[bool] =
result = acc.methods.containsFn address ## Would be named `contains` if it returned `bool` rather than `Result[]`.
result = acc.methods.hasPathFn address
acc.ifTrackNewApi: acc.ifTrackNewApi:
info newApiTxt "acc/contains()", address=address.toStr, result=result.toStr info newApiTxt "acc/hasKey()", address=address.toStr, result=result.toStr
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public transaction related methods # Public transaction related methods
@ -630,7 +678,7 @@ when ProvideCoreDbLegacyAPI:
proc get*(kvt: CoreDbKvtRef; key: openArray[byte]): Blob = proc get*(kvt: CoreDbKvtRef; key: openArray[byte]): Blob =
kvt.setTrackLegaApiOnly kvt.setTrackLegaApiOnly
const info = "kvt/get()" const info = "kvt/get()"
result = kvt.distinctBase.get(key).expect info result = kvt.distinctBase.getOrEmpty(key).expect info
kvt.ifTrackLegaApi: kvt.ifTrackLegaApi:
info legaApiTxt info, key=key.toStr, result=result.toStr info legaApiTxt info, key=key.toStr, result=result.toStr
@ -650,7 +698,7 @@ when ProvideCoreDbLegacyAPI:
proc contains*(kvt: CoreDbKvtRef; key: openArray[byte]): bool = proc contains*(kvt: CoreDbKvtRef; key: openArray[byte]): bool =
kvt.setTrackLegaApiOnly kvt.setTrackLegaApiOnly
const info = "kvt/contains()" const info = "kvt/contains()"
result = kvt.distinctBase.contains(key).expect info result = kvt.distinctBase.hasKey(key).expect info
kvt.ifTrackLegaApi: info legaApiTxt info, key=key.toStr, result kvt.ifTrackLegaApi: info legaApiTxt info, key=key.toStr, result
iterator pairs*(kvt: CoreDbKvtRef): (Blob, Blob) {.apiRaise.} = iterator pairs*(kvt: CoreDbKvtRef): (Blob, Blob) {.apiRaise.} =
@ -703,7 +751,7 @@ when ProvideCoreDbLegacyAPI:
proc get*(trie: CoreDbTrieRefs; key: openArray[byte]): Blob = proc get*(trie: CoreDbTrieRefs; key: openArray[byte]): Blob =
trie.setTrackLegaApiOnly trie.setTrackLegaApiOnly
const info = "trie/get()" const info = "trie/get()"
result = trie.distinctBase.fetch(key).expect "trie/get()" result = trie.distinctBase.fetchOrEmpty(key).expect "trie/get()"
trie.ifTrackLegaApi: trie.ifTrackLegaApi:
info legaApiTxt info, key=key.toStr, result=result.toStr info legaApiTxt info, key=key.toStr, result=result.toStr
@ -726,7 +774,7 @@ when ProvideCoreDbLegacyAPI:
proc contains*(trie: CoreDbTrieRefs; key: openArray[byte]): bool = proc contains*(trie: CoreDbTrieRefs; key: openArray[byte]): bool =
trie.setTrackLegaApiOnly trie.setTrackLegaApiOnly
const info = "trie/contains()" const info = "trie/contains()"
result = trie.distinctBase.contains(key).expect info result = trie.distinctBase.hasPath(key).expect info
trie.ifTrackLegaApi: info legaApiTxt info, key=key.toStr, result trie.ifTrackLegaApi: info legaApiTxt info, key=key.toStr, result
proc rootHash*(trie: CoreDbTrieRefs): Hash256 = proc rootHash*(trie: CoreDbTrieRefs): Hash256 =

View File

@ -39,10 +39,12 @@ type
codeHash*: Hash256 codeHash*: Hash256
CoreDbErrorCode* = enum CoreDbErrorCode* = enum
Unspecified = 0 Unset = 0
Unspecified
RlpException RlpException
KvtNotFound KvtNotFound
MptNotFound MptNotFound
AccNotFound
RootNotFound RootNotFound
CoreDbCaptFlags* {.pure.} = enum CoreDbCaptFlags* {.pure.} = enum
@ -101,7 +103,7 @@ type
CoreDbKvtDelFn* = proc(k: openArray[byte]): CoreDbRc[void] {.noRaise.} CoreDbKvtDelFn* = proc(k: openArray[byte]): CoreDbRc[void] {.noRaise.}
CoreDbKvtPutFn* = CoreDbKvtPutFn* =
proc(k: openArray[byte]; v: openArray[byte]): CoreDbRc[void] {.noRaise.} proc(k: openArray[byte]; v: openArray[byte]): CoreDbRc[void] {.noRaise.}
CoreDbKvtContainsFn* = proc(k: openArray[byte]): CoreDbRc[bool] {.noRaise.} CoreDbKvtHasKeyFn* = proc(k: openArray[byte]): CoreDbRc[bool] {.noRaise.}
CoreDbKvtPairsIt* = iterator(): (Blob,Blob) {.apiRaise.} CoreDbKvtPairsIt* = iterator(): (Blob,Blob) {.apiRaise.}
CoreDbKvtFns* = object CoreDbKvtFns* = object
@ -110,7 +112,7 @@ type
getFn*: CoreDbKvtGetFn getFn*: CoreDbKvtGetFn
delFn*: CoreDbKvtDelFn delFn*: CoreDbKvtDelFn
putFn*: CoreDbKvtPutFn putFn*: CoreDbKvtPutFn
containsFn*: CoreDbKvtContainsFn hasKeyFn*: CoreDbKvtHasKeyFn
pairsIt*: CoreDbKvtPairsIt pairsIt*: CoreDbKvtPairsIt
@ -128,7 +130,7 @@ type
proc(k: openArray[byte]; v: openArray[byte]): CoreDbRc[void] {.noRaise.} proc(k: openArray[byte]; v: openArray[byte]): CoreDbRc[void] {.noRaise.}
CoreDbMptMergeAccountFn* = CoreDbMptMergeAccountFn* =
proc(k: openArray[byte]; v: CoreDbAccount): CoreDbRc[void] {.noRaise.} proc(k: openArray[byte]; v: CoreDbAccount): CoreDbRc[void] {.noRaise.}
CoreDbMptContainsFn* = proc(k: openArray[byte]): CoreDbRc[bool] {.noRaise.} CoreDbMptHasPathFn* = proc(k: openArray[byte]): CoreDbRc[bool] {.noRaise.}
CoreDbMptRootVidFn* = proc(): CoreDbVidRef {.noRaise.} CoreDbMptRootVidFn* = proc(): CoreDbVidRef {.noRaise.}
CoreDbMptIsPruningFn* = proc(): bool {.noRaise.} CoreDbMptIsPruningFn* = proc(): bool {.noRaise.}
CoreDbMptPairsIt* = iterator(): (Blob,Blob) {.apiRaise.} CoreDbMptPairsIt* = iterator(): (Blob,Blob) {.apiRaise.}
@ -140,7 +142,7 @@ type
fetchFn*: CoreDbMptFetchFn fetchFn*: CoreDbMptFetchFn
deleteFn*: CoreDbMptDeleteFn deleteFn*: CoreDbMptDeleteFn
mergeFn*: CoreDbMptMergeFn mergeFn*: CoreDbMptMergeFn
containsFn*: CoreDbMptContainsFn hasPathFn*: CoreDbMptHasPathFn
rootVidFn*: CoreDbMptRootVidFn rootVidFn*: CoreDbMptRootVidFn
pairsIt*: CoreDbMptPairsIt pairsIt*: CoreDbMptPairsIt
replicateIt*: CoreDbMptReplicateIt replicateIt*: CoreDbMptReplicateIt
@ -155,7 +157,7 @@ type
CoreDbAccDeleteFn* = proc(k: EthAddress): CoreDbRc[void] {.noRaise.} CoreDbAccDeleteFn* = proc(k: EthAddress): CoreDbRc[void] {.noRaise.}
CoreDbAccMergeFn* = CoreDbAccMergeFn* =
proc(k: EthAddress; v: CoreDbAccount): CoreDbRc[void] {.noRaise.} proc(k: EthAddress; v: CoreDbAccount): CoreDbRc[void] {.noRaise.}
CoreDbAccContainsFn* = proc(k: EthAddress): CoreDbRc[bool] {.noRaise.} CoreDbAccHasPathFn* = proc(k: EthAddress): CoreDbRc[bool] {.noRaise.}
CoreDbAccRootVidFn* = proc(): CoreDbVidRef {.noRaise.} CoreDbAccRootVidFn* = proc(): CoreDbVidRef {.noRaise.}
CoreDbAccIsPruningFn* = proc(): bool {.noRaise.} CoreDbAccIsPruningFn* = proc(): bool {.noRaise.}
@ -165,7 +167,7 @@ type
fetchFn*: CoreDbAccFetchFn fetchFn*: CoreDbAccFetchFn
deleteFn*: CoreDbAccDeleteFn deleteFn*: CoreDbAccDeleteFn
mergeFn*: CoreDbAccMergeFn mergeFn*: CoreDbAccMergeFn
containsFn*: CoreDbAccContainsFn hasPathFn*: CoreDbAccHasPathFn
rootVidFn*: CoreDbAccRootVidFn rootVidFn*: CoreDbAccRootVidFn
isPruningFn*: CoreDbAccIsPruningFn isPruningFn*: CoreDbAccIsPruningFn

View File

@ -48,7 +48,7 @@ proc validateMethodsDesc(kvt: CoreDbKvtFns) =
doAssert not kvt.getFn.isNil doAssert not kvt.getFn.isNil
doAssert not kvt.delFn.isNil doAssert not kvt.delFn.isNil
doAssert not kvt.putFn.isNil doAssert not kvt.putFn.isNil
doAssert not kvt.containsFn.isNil doAssert not kvt.hasKeyFn.isNil
doAssert not kvt.pairsIt.isNil doAssert not kvt.pairsIt.isNil
proc validateMethodsDesc(fns: CoreDbMptFns) = proc validateMethodsDesc(fns: CoreDbMptFns) =
@ -56,7 +56,7 @@ proc validateMethodsDesc(fns: CoreDbMptFns) =
doAssert not fns.fetchFn.isNil doAssert not fns.fetchFn.isNil
doAssert not fns.deleteFn.isNil doAssert not fns.deleteFn.isNil
doAssert not fns.mergeFn.isNil doAssert not fns.mergeFn.isNil
doAssert not fns.containsFn.isNil doAssert not fns.hasPathFn.isNil
doAssert not fns.rootVidFn.isNil doAssert not fns.rootVidFn.isNil
doAssert not fns.isPruningFn.isNil doAssert not fns.isPruningFn.isNil
doAssert not fns.pairsIt.isNil doAssert not fns.pairsIt.isNil
@ -67,7 +67,7 @@ proc validateMethodsDesc(fns: CoreDbAccFns) =
doAssert not fns.fetchFn.isNil doAssert not fns.fetchFn.isNil
doAssert not fns.deleteFn.isNil doAssert not fns.deleteFn.isNil
doAssert not fns.mergeFn.isNil doAssert not fns.mergeFn.isNil
doAssert not fns.containsFn.isNil doAssert not fns.hasPathFn.isNil
doAssert not fns.rootVidFn.isNil doAssert not fns.rootVidFn.isNil
doAssert not fns.isPruningFn.isNil doAssert not fns.isPruningFn.isNil
@ -79,6 +79,7 @@ proc validateMethodsDesc(vid: CoreDbVidRef) =
doAssert vid.ready == true doAssert vid.ready == true
proc validateMethodsDesc(e: CoreDbErrorRef) = proc validateMethodsDesc(e: CoreDbErrorRef) =
doAssert e.error != CoreDbErrorCode(0)
doAssert not e.isNil doAssert not e.isNil
doAssert not e.parent.isNil doAssert not e.parent.isNil

View File

@ -8,6 +8,8 @@
# at your option. This file may not be copied, modified, or distributed except # at your option. This file may not be copied, modified, or distributed except
# according to those terms. # according to those terms.
## This file was renamed from `core_apps.nim`.
{.push raises: [].} {.push raises: [].}
import import

View File

@ -0,0 +1,914 @@
# Nimbus
# Copyright (c) 2018-2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Rewrite of `core_apps.nim` using the new `CoreDb` API. The original
## `core_apps.nim` was renamed `core_apps_legacy.nim`.
{.push raises: [].}
import
std/[algorithm, options, sequtils],
chronicles,
eth/[common, rlp],
results,
stew/byteutils,
"../.."/[errors, constants],
".."/[aristo, storage_types],
"."/base
logScope:
topics = "core_db-apps"
type
TransactionKey = tuple
blockNumber: BlockNumber
index: int
# ------------------------------------------------------------------------------
# Forward declarations
# ------------------------------------------------------------------------------
proc getBlockHeader*(
db: CoreDbRef;
n: BlockNumber;
output: var BlockHeader;
): bool
{.gcsafe, raises: [RlpError].}
proc getBlockHeader*(
db: CoreDbRef,
blockHash: Hash256;
): BlockHeader
{.gcsafe, raises: [BlockNotFound].}
proc getBlockHash*(
db: CoreDbRef;
n: BlockNumber;
output: var Hash256;
): bool
{.gcsafe, raises: [RlpError].}
proc addBlockNumberToHashLookup*(
db: CoreDbRef;
header: BlockHeader;
) {.gcsafe.}
proc getBlockHeader*(
db: CoreDbRef;
blockHash: Hash256;
output: var BlockHeader;
): bool
{.gcsafe.}
# Copied from `utils/utils` which cannot be imported here in order to
# avoid circular imports.
func hash(b: BlockHeader): Hash256
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
template logTxt(info: static[string]): static[string] =
"Core apps " & info
template discardRlpException(info: static[string]; code: untyped) =
try:
code
except RlpError as e:
warn logTxt info, error=($e.name), msg=e.msg
# ------------------------------------------------------------------------------
# Private iterators
# ------------------------------------------------------------------------------
iterator findNewAncestors(
db: CoreDbRef;
header: BlockHeader;
): BlockHeader
{.gcsafe, raises: [RlpError,BlockNotFound].} =
## Returns the chain leading up from the given header until the first
## ancestor it has in common with our canonical chain.
var h = header
var orig: BlockHeader
while true:
if db.getBlockHeader(h.blockNumber, orig) and orig.hash == h.hash:
break
yield h
if h.parentHash == GENESIS_PARENT_HASH:
break
else:
h = db.getBlockHeader(h.parentHash)
# ------------------------------------------------------------------------------
# Public iterators
# ------------------------------------------------------------------------------
iterator getBlockTransactionData*(
db: CoreDbRef;
transactionRoot: Hash256;
): seq[byte]
{.gcsafe, raises: [RlpError].} =
block body:
let root = db.getRoot(transactionRoot).valueOr:
warn logTxt "getBlockTransactionData()",
transactionRoot, action="getRoot()", `error`=($$error)
break body
let transactionDb = db.newMpt root
var transactionIdx = 0
while true:
let transactionKey = rlp.encode(transactionIdx)
let data = transactionDb.fetch(transactionKey).valueOr:
if error.error != MptNotFound:
warn logTxt "getBlockTransactionData()", transactionRoot,
transactionKey, action="fetch()", error=($$error)
break body
yield data
inc transactionIdx
iterator getBlockTransactions*(
db: CoreDbRef;
header: BlockHeader;
): Transaction
{.gcsafe, raises: [RlpError].} =
for encodedTx in db.getBlockTransactionData(header.txRoot):
yield rlp.decode(encodedTx, Transaction)
iterator getBlockTransactionHashes*(
db: CoreDbRef;
blockHeader: BlockHeader;
): Hash256
{.gcsafe, raises: [RlpError].} =
## Returns an iterable of the transaction hashes from th block specified
## by the given block header.
for encodedTx in db.getBlockTransactionData(blockHeader.txRoot):
let tx = rlp.decode(encodedTx, Transaction)
yield rlpHash(tx) # beware EIP-4844
iterator getWithdrawalsData*(
db: CoreDbRef;
withdrawalsRoot: Hash256;
): seq[byte]
{.gcsafe, raises: [RlpError].} =
block body:
let root = db.getRoot(withdrawalsRoot).valueOr:
warn logTxt "getWithdrawalsData()",
withdrawalsRoot, action="getRoot()", error=($$error)
break body
let wddb = db.newMpt root
var idx = 0
while true:
let wdKey = rlp.encode(idx)
let data = wddb.fetch(wdKey).valueOr:
if error.error != MptNotFound:
warn logTxt "getWithdrawalsData()",
withdrawalsRoot, wdKey, action="fetch()", error=($$error)
break body
yield data
inc idx
iterator getReceipts*(
db: CoreDbRef;
receiptRoot: Hash256;
): Receipt
{.gcsafe, raises: [RlpError].} =
block body:
let root = db.getRoot(receiptRoot).valueOr:
warn logTxt "getWithdrawalsData()",
receiptRoot, action="getRoot()", error=($$error)
break body
var receiptDb = db.newMpt root
var receiptIdx = 0
while true:
let receiptKey = rlp.encode(receiptIdx)
let receiptData = receiptDb.fetch(receiptKey).valueOr:
if error.error != MptNotFound:
warn logTxt "getWithdrawalsData()",
receiptRoot, receiptKey, action="hasKey()", error=($$error)
break body
yield rlp.decode(receiptData, Receipt)
inc receiptIdx
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
func hash(b: BlockHeader): Hash256 =
rlpHash(b)
proc removeTransactionFromCanonicalChain(
db: CoreDbRef;
transactionHash: Hash256;
) =
## Removes the transaction specified by the given hash from the canonical
## chain.
db.newKvt.del(transactionHashToBlockKey(transactionHash).toOpenArray).isOkOr:
warn logTxt "removeTransactionFromCanonicalChain()",
transactionHash, action="del()", error=($$error)
proc setAsCanonicalChainHead(
db: CoreDbRef;
headerHash: Hash256;
): seq[BlockHeader]
{.gcsafe, raises: [RlpError,BlockNotFound].} =
## Sets the header as the canonical chain HEAD.
let header = db.getBlockHeader(headerHash)
var newCanonicalHeaders = sequtils.toSeq(db.findNewAncestors(header))
reverse(newCanonicalHeaders)
for h in newCanonicalHeaders:
var oldHash: Hash256
if not db.getBlockHash(h.blockNumber, oldHash):
break
let oldHeader = db.getBlockHeader(oldHash)
for txHash in db.getBlockTransactionHashes(oldHeader):
db.removeTransactionFromCanonicalChain(txHash)
# TODO re-add txn to internal pending pool (only if local sender)
for h in newCanonicalHeaders:
db.addBlockNumberToHashLookup(h)
let canonicalHeadHash = canonicalHeadHashKey()
db.newKvt.put(canonicalHeadHash.toOpenArray, rlp.encode(headerHash)).isOkOr:
warn logTxt "setAsCanonicalChainHead()",
canonicalHeadHash, action="put()", error=($$error)
return newCanonicalHeaders
proc markCanonicalChain(
db: CoreDbRef;
header: BlockHeader;
headerHash: Hash256;
): bool
{.gcsafe, raises: [RlpError].} =
## mark this chain as canonical by adding block number to hash lookup
## down to forking point
var
currHash = headerHash
currHeader = header
# mark current header as canonical
let
kvt = db.newKvt()
key = blockNumberToHashKey(currHeader.blockNumber)
kvt.put(key.toOpenArray, rlp.encode(currHash)).isOkOr:
warn logTxt "markCanonicalChain()", key, action="put()", error=($$error)
return false
# it is a genesis block, done
if currHeader.parentHash == Hash256():
return true
# mark ancestor blocks as canonical too
currHash = currHeader.parentHash
if not db.getBlockHeader(currHeader.parentHash, currHeader):
return false
while currHash != Hash256():
let key = blockNumberToHashKey(currHeader.blockNumber)
let data = kvt.getOrEmpty(key.toOpenArray).valueOr:
warn logTxt "markCanonicalChain()", key, action="get()", error=($$error)
return false
if data.len == 0:
# not marked, mark it
kvt.put(key.toOpenArray, rlp.encode(currHash)).isOkOr:
warn logTxt "markCanonicalChain()", key, action="put()", error=($$error)
elif rlp.decode(data, Hash256) != currHash:
# replace prev chain
kvt.put(key.toOpenArray, rlp.encode(currHash)).isOkOr:
warn logTxt "markCanonicalChain()", key, action="put()", error=($$error)
else:
# forking point, done
break
if currHeader.parentHash == Hash256():
break
currHash = currHeader.parentHash
if not db.getBlockHeader(currHeader.parentHash, currHeader):
return false
return true
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc exists*(db: CoreDbRef, hash: Hash256): bool =
db.newKvt.hasKey(hash.data).valueOr:
warn logTxt "exisis()", hash, action="hasKey()", error=($$error)
return false
proc getBlockHeader*(
db: CoreDbRef;
blockHash: Hash256;
output: var BlockHeader;
): bool =
const info = "getBlockHeader()"
let data = db.newKvt.get(genericHashKey(blockHash).toOpenArray).valueOr:
if error.error != KvtNotFound:
warn logTxt info, blockHash, action="get()", error=($$error)
return false
discardRlpException info:
output = rlp.decode(data, BlockHeader)
return true
proc getBlockHeader*(
db: CoreDbRef,
blockHash: Hash256;
): BlockHeader =
## Returns the requested block header as specified by block hash.
##
## Raises BlockNotFound if it is not present in the db.
if not db.getBlockHeader(blockHash, result):
raise newException(
BlockNotFound, "No block with hash " & blockHash.data.toHex)
proc getHash(
db: CoreDbRef;
key: DbKey;
output: var Hash256;
): bool
{.gcsafe, raises: [RlpError].} =
let data = db.newKvt.get(key.toOpenArray).valueOr:
if error.error != KvtNotFound:
warn logTxt "getHash()", key, action="get()", error=($$error)
return false
output = rlp.decode(data, Hash256)
true
proc getCanonicalHead*(
db: CoreDbRef;
output: var BlockHeader;
): bool =
discardRlpException "getCanonicalHead()":
var headHash: Hash256
if db.getHash(canonicalHeadHashKey(), headHash) and
db.getBlockHeader(headHash, output):
return true
proc getCanonicalHead*(
db: CoreDbRef;
): BlockHeader
{.gcsafe, raises: [EVMError].} =
if not db.getCanonicalHead result:
raise newException(
CanonicalHeadNotFound, "No canonical head set for this chain")
proc getCanonicalHeaderHash*(
db: CoreDbRef;
): Hash256
{.gcsafe, raises: [RlpError].} =
discard db.getHash(canonicalHeadHashKey(), result)
proc getBlockHash*(
db: CoreDbRef;
n: BlockNumber;
output: var Hash256;
): bool =
## Return the block hash for the given block number.
db.getHash(blockNumberToHashKey(n), output)
proc getBlockHash*(
db: CoreDbRef;
n: BlockNumber;
): Hash256
{.gcsafe, raises: [RlpError,BlockNotFound].} =
## Return the block hash for the given block number.
if not db.getHash(blockNumberToHashKey(n), result):
raise newException(BlockNotFound, "No block hash for number " & $n)
proc getHeadBlockHash*(
db: CoreDbRef;
): Hash256
{.gcsafe, raises: [RlpError].} =
if not db.getHash(canonicalHeadHashKey(), result):
result = Hash256()
proc getBlockHeader*(
db: CoreDbRef;
n: BlockNumber;
output: var BlockHeader;
): bool =
## Returns the block header with the given number in the canonical chain.
var blockHash: Hash256
if db.getBlockHash(n, blockHash):
result = db.getBlockHeader(blockHash, output)
proc getBlockHeaderWithHash*(
db: CoreDbRef;
n: BlockNumber;
): Option[(BlockHeader, Hash256)]
{.gcsafe, raises: [RlpError].} =
## Returns the block header and its hash, with the given number in the
## canonical chain. Hash is returned to avoid recomputing it
var hash: Hash256
if db.getBlockHash(n, hash):
# Note: this will throw if header is not present.
var header: BlockHeader
if db.getBlockHeader(hash, header):
return some((header, hash))
else:
# this should not happen, but if it happen lets fail laudly as this means
# something is super wrong
raiseAssert("Corrupted database. Mapping number->hash present, without header in database")
else:
return none[(BlockHeader, Hash256)]()
proc getBlockHeader*(
db: CoreDbRef;
n: BlockNumber;
): BlockHeader
{.gcsafe, raises: [RlpError,BlockNotFound].} =
## Returns the block header with the given number in the canonical chain.
## Raises BlockNotFound error if the block is not in the DB.
db.getBlockHeader(db.getBlockHash(n))
proc getScore*(
db: CoreDbRef;
blockHash: Hash256;
): UInt256
{.gcsafe, raises: [RlpError].} =
let data = db.newKvt.get(blockHashToScoreKey(blockHash).toOpenArray).valueOr:
if error.error != KvtNotFound:
warn logTxt "getScore()", blockHash, action="get()", error=($$error)
return
rlp.decode(data, UInt256)
proc setScore*(db: CoreDbRef; blockHash: Hash256, score: UInt256) =
## for testing purpose
let scoreKey = blockHashToScoreKey blockHash
db.newKvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr:
warn logTxt "setScore()", scoreKey, action="put()", error=($$error)
return
proc getTd*(db: CoreDbRef; blockHash: Hash256, td: var UInt256): bool =
const info = "getId()"
let bytes = db.newKvt.get(blockHashToScoreKey(blockHash).toOpenArray).valueOr:
if error.error != KvtNotFound:
warn logTxt info, blockHash, action="get()", error=($$error)
return false
discardRlpException info:
td = rlp.decode(bytes, UInt256)
return true
proc headTotalDifficulty*(
db: CoreDbRef;
): UInt256
{.gcsafe, raises: [RlpError].} =
# this is actually a combination of `getHash` and `getScore`
const
info = "headTotalDifficulty()"
key = canonicalHeadHashKey()
let
kvt = db.newKvt
data = kvt.get(key.toOpenArray).valueOr:
if error.error != KvtNotFound:
warn logTxt info, key, action="get()", error=($$error)
return 0.u256
blockHash = rlp.decode(data, Hash256)
numData = kvt.get(blockHashToScoreKey(blockHash).toOpenArray).valueOr:
warn logTxt info, blockHash, action="get()", error=($$error)
return 0.u256
rlp.decode(numData, UInt256)
proc getAncestorsHashes*(
db: CoreDbRef;
limit: UInt256;
header: BlockHeader;
): seq[Hash256]
{.gcsafe, raises: [BlockNotFound].} =
var ancestorCount = min(header.blockNumber, limit).truncate(int)
var h = header
result = newSeq[Hash256](ancestorCount)
while ancestorCount > 0:
h = db.getBlockHeader(h.parentHash)
result[ancestorCount - 1] = h.hash
dec ancestorCount
proc addBlockNumberToHashLookup*(db: CoreDbRef; header: BlockHeader) =
let blockNumberKey = blockNumberToHashKey(header.blockNumber)
db.newKvt.put(blockNumberKey.toOpenArray, rlp.encode(header.hash)).isOkOr:
warn logTxt "addBlockNumberToHashLookup()",
blockNumberKey, action="put()", error=($$error)
proc persistTransactions*(
db: CoreDbRef;
blockNumber: BlockNumber;
transactions: openArray[Transaction];
): Hash256
{.gcsafe, raises: [CatchableError].} =
const
info = "persistTransactions()"
let
mpt = db.newMpt()
kvt = db.newKvt()
for idx, tx in transactions:
let
encodedTx = rlp.encode(tx.removeNetworkPayload)
txHash = rlpHash(tx) # beware EIP-4844
blockKey = transactionHashToBlockKey(txHash)
txKey: TransactionKey = (blockNumber, idx)
mpt.merge(rlp.encode(idx), encodedTx).isOkOr:
warn logTxt info, idx, action="merge()", error=($$error)
return EMPTY_ROOT_HASH
kvt.put(blockKey.toOpenArray, rlp.encode(txKey)).isOkOr:
warn logTxt info, blockKey, action="put()", error=($$error)
return EMPTY_ROOT_HASH
mpt.rootVid.hash.valueOr:
warn logTxt info, action="hash()"
return EMPTY_ROOT_HASH
proc getTransaction*(
db: CoreDbRef;
txRoot: Hash256;
txIndex: int;
res: var Transaction;
): bool
{.gcsafe, raises: [RlpError].} =
const
info = "getTransaction()"
let
mpt = block:
let root = db.getRoot(txRoot).valueOr:
warn logTxt info, txRoot, action="getRoot()", `error`=($$error)
return false
db.newMpt root
txData = mpt.fetch(rlp.encode(txIndex)).valueOr:
if error.error != MptNotFound:
warn logTxt info, txIndex, action="fetch()", `error`=($$error)
return false
res = rlp.decode(txData, Transaction)
true
proc getTransactionCount*(
db: CoreDbRef;
txRoot: Hash256;
): int
{.gcsafe, raises: [RlpError].} =
const
info = "getTransactionCount()"
let mpt = block:
let root = db.getRoot(txRoot).valueOr:
warn logTxt info, txRoot, action="getRoot()", `error`=($$error)
return 0
db.newMpt root
var txCount = 0
while true:
let hasPath = mpt.hasPath(rlp.encode(txCount)).valueOr:
warn logTxt info, txCount, action="hasPath()", `error`=($$error)
return 0
if hasPath:
inc txCount
else:
return txCount
doAssert(false, "unreachable")
proc getUnclesCount*(
db: CoreDbRef;
ommersHash: Hash256;
): int
{.gcsafe, raises: [RlpError].} =
const info = "getUnclesCount()"
if ommersHash != EMPTY_UNCLE_HASH:
let encodedUncles = block:
let key = genericHashKey(ommersHash)
db.newKvt.get(key.toOpenArray).valueOr:
if error.error == KvtNotFound:
warn logTxt info, ommersHash, action="get()", `error`=($$error)
return 0
return rlpFromBytes(encodedUncles).listLen
proc getUncles*(
db: CoreDbRef;
ommersHash: Hash256;
): seq[BlockHeader]
{.gcsafe, raises: [RlpError].} =
const info = "getUncles()"
if ommersHash != EMPTY_UNCLE_HASH:
let encodedUncles = block:
let key = genericHashKey(ommersHash)
db.newKvt.get(key.toOpenArray).valueOr:
if error.error == KvtNotFound:
warn logTxt info, ommersHash, action="get()", `error`=($$error)
return @[]
return rlp.decode(encodedUncles, seq[BlockHeader])
proc persistWithdrawals*(
db: CoreDbRef;
withdrawals: openArray[Withdrawal];
): Hash256
{.gcsafe, raises: [CatchableError].} =
const info = "persistWithdrawals()"
let mpt = db.newMpt()
for idx, wd in withdrawals:
mpt.merge(rlp.encode(idx), rlp.encode(wd)).isOkOr:
warn logTxt info, idx, action="merge()", error=($$error)
return EMPTY_ROOT_HASH
mpt.rootVid.hash.valueOr:
warn logTxt info, action="hash()"
return EMPTY_ROOT_HASH
proc getWithdrawals*(
db: CoreDbRef;
withdrawalsRoot: Hash256;
): seq[Withdrawal]
{.gcsafe, raises: [RlpError].} =
for encodedWd in db.getWithdrawalsData(withdrawalsRoot):
result.add(rlp.decode(encodedWd, Withdrawal))
proc getBlockBody*(
db: CoreDbRef;
header: BlockHeader;
output: var BlockBody;
): bool
{.gcsafe, raises: [RlpError].} =
output.transactions = @[]
output.uncles = @[]
for encodedTx in db.getBlockTransactionData(header.txRoot):
output.transactions.add(rlp.decode(encodedTx, Transaction))
if header.withdrawalsRoot.isSome:
output.withdrawals = some(db.getWithdrawals(header.withdrawalsRoot.get))
if header.ommersHash != EMPTY_UNCLE_HASH:
let
key = genericHashKey(header.ommersHash)
encodedUncles = db.newKvt.get(key.toOpenArray).valueOr:
if error.error == KvtNotFound:
warn logTxt "getBlockBody()",
ommersHash=header.ommersHash, action="get()", `error`=($$error)
return false
output.uncles = rlp.decode(encodedUncles, seq[BlockHeader])
true
proc getBlockBody*(
db: CoreDbRef;
blockHash: Hash256;
output: var BlockBody;
): bool
{.gcsafe, raises: [RlpError].} =
var header: BlockHeader
if db.getBlockHeader(blockHash, header):
return db.getBlockBody(header, output)
proc getBlockBody*(
db: CoreDbRef;
hash: Hash256;
): BlockBody
{.gcsafe, raises: [RlpError,ValueError].} =
if not db.getBlockBody(hash, result):
raise newException(ValueError, "Error when retrieving block body")
proc getUncleHashes*(
db: CoreDbRef;
blockHashes: openArray[Hash256];
): seq[Hash256]
{.gcsafe, raises: [RlpError,ValueError].} =
for blockHash in blockHashes:
result &= db.getBlockBody(blockHash).uncles.mapIt(it.hash)
proc getUncleHashes*(
db: CoreDbRef;
header: BlockHeader;
): seq[Hash256]
{.gcsafe, raises: [RlpError].} =
if header.ommersHash != EMPTY_UNCLE_HASH:
let
key = genericHashKey(header.ommersHash)
encodedUncles = db.newKvt.get(key.toOpenArray).valueOr:
if error.error == KvtNotFound:
warn logTxt "getUncleHashes()",
ommersHash=header.ommersHash, action="get()", `error`=($$error)
return @[]
return rlp.decode(encodedUncles, seq[BlockHeader]).mapIt(it.hash)
proc getTransactionKey*(
db: CoreDbRef;
transactionHash: Hash256;
): tuple[blockNumber: BlockNumber, index: int]
{.gcsafe, raises: [RlpError].} =
let
txKey = transactionHashToBlockKey(transactionHash)
tx = db.newKvt.get(txKey.toOpenArray).valueOr:
if error.error == KvtNotFound:
warn logTxt "getTransactionKey()",
transactionHash, action="get()", `error`=($$error)
return (0.toBlockNumber, -1)
let key = rlp.decode(tx, TransactionKey)
(key.blockNumber, key.index)
proc headerExists*(db: CoreDbRef; blockHash: Hash256): bool =
## Returns True if the header with the given block hash is in our DB.
db.newKvt.hasKey(genericHashKey(blockHash).toOpenArray).valueOr:
warn logTxt "headerExists()", blockHash, action="get()", `error`=($$error)
return false
proc setHead*(
db: CoreDbRef;
blockHash: Hash256;
): bool
{.gcsafe, raises: [RlpError].} =
var header: BlockHeader
if not db.getBlockHeader(blockHash, header):
return false
if not db.markCanonicalChain(header, blockHash):
return false
let canonicalHeadHash = canonicalHeadHashKey()
db.newKvt.put(canonicalHeadHash.toOpenArray, rlp.encode(blockHash)).isOkOr:
warn logTxt "setHead()", canonicalHeadHash, action="put()", error=($$error)
return true
proc setHead*(
db: CoreDbRef;
header: BlockHeader;
writeHeader = false;
): bool
{.gcsafe, raises: [RlpError].} =
var headerHash = rlpHash(header)
let kvt = db.newKvt()
if writeHeader:
kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr:
warn logTxt "setHead()", headerHash, action="put()", error=($$error)
return false
if not db.markCanonicalChain(header, headerHash):
return false
let canonicalHeadHash = canonicalHeadHashKey()
kvt.put(canonicalHeadHash.toOpenArray, rlp.encode(headerHash)).isOkOr:
warn logTxt "setHead()", canonicalHeadHash, action="put()", error=($$error)
return false
true
proc persistReceipts*(
db: CoreDbRef;
receipts: openArray[Receipt];
): Hash256
{.gcsafe, raises: [CatchableError].} =
const info = "persistReceipts()"
let mpt = db.newMpt()
for idx, rec in receipts:
mpt.merge(rlp.encode(idx), rlp.encode(rec)).isOkOr:
warn logTxt info, idx, action="merge()", error=($$error)
mpt.rootVid.hash.valueOr:
warn logTxt info, action="hash()"
return EMPTY_ROOT_HASH
proc getReceipts*(
db: CoreDbRef;
receiptRoot: Hash256;
): seq[Receipt]
{.gcsafe, raises: [RlpError].} =
var receipts = newSeq[Receipt]()
for r in db.getReceipts(receiptRoot):
receipts.add(r)
return receipts
proc persistHeaderToDb*(
db: CoreDbRef;
header: BlockHeader;
forceCanonical: bool;
startOfHistory = GENESIS_PARENT_HASH;
): seq[BlockHeader]
{.gcsafe, raises: [RlpError,EVMError].} =
let isStartOfHistory = header.parentHash == startOfHistory
let headerHash = header.blockHash
if not isStartOfHistory and not db.headerExists(header.parentHash):
raise newException(ParentNotFound, "Cannot persist block header " &
$headerHash & " with unknown parent " & $header.parentHash)
let kvt = db.newKvt()
kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr:
warn logTxt "persistHeaderToDb()",
headerHash, action="put()", `error`=($$error)
return @[]
let score = if isStartOfHistory: header.difficulty
else: db.getScore(header.parentHash) + header.difficulty
let scoreKey = blockHashToScoreKey(headerHash)
kvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr:
warn logTxt "persistHeaderToDb()",
scoreKey, action="put()", `error`=($$error)
return @[]
db.addBlockNumberToHashLookup(header)
var canonHeader: BlockHeader
if not db.getCanonicalHead canonHeader:
return db.setAsCanonicalChainHead(headerHash)
let headScore = db.getScore(canonHeader.hash)
if score > headScore or forceCanonical:
return db.setAsCanonicalChainHead(headerHash)
proc persistHeaderToDbWithoutSetHead*(
db: CoreDbRef;
header: BlockHeader;
startOfHistory = GENESIS_PARENT_HASH;
) {.gcsafe, raises: [RlpError].} =
let isStartOfHistory = header.parentHash == startOfHistory
let headerHash = header.blockHash
let score = if isStartOfHistory: header.difficulty
else: db.getScore(header.parentHash) + header.difficulty
let
kvt = db.newKvt()
scoreKey = blockHashToScoreKey(headerHash)
kvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr:
warn logTxt "persistHeaderToDbWithoutSetHead()",
scoreKey, action="put()", `error`=($$error)
return
kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr:
warn logTxt "persistHeaderToDbWithoutSetHead()",
headerHash, action="put()", `error`=($$error)
return
# FIXME-Adam: This seems like a bad idea. I don't see a way to get the score
# in stateless mode, but it seems dangerous to just shove the header into
# the DB *without* also storing the score.
proc persistHeaderToDbWithoutSetHeadOrScore*(db: CoreDbRef; header: BlockHeader) =
db.addBlockNumberToHashLookup(header)
let
kvt = db.newKvt()
blockHash = header.blockHash
kvt.put(genericHashKey(blockHash).toOpenArray, rlp.encode(header)).isOkOr:
warn logTxt "persistHeaderToDbWithoutSetHeadOrScore()",
blockHash, action="put()", `error`=($$error)
return
proc persistUncles*(db: CoreDbRef, uncles: openArray[BlockHeader]): Hash256 =
## Persists the list of uncles to the database.
## Returns the uncles hash.
let enc = rlp.encode(uncles)
result = keccakHash(enc)
db.newKvt.put(genericHashKey(result).toOpenArray, enc).isOkOr:
warn logTxt "persistUncles()",
unclesHash=result, action="put()", `error`=($$error)
return EMPTY_ROOT_HASH
proc safeHeaderHash*(
db: CoreDbRef;
): Hash256
{.gcsafe, raises: [RlpError].} =
discard db.getHash(safeHashKey(), result)
proc safeHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
let safeHashKey = safeHashKey()
db.newKvt.put(safeHashKey.toOpenArray, rlp.encode(headerHash)).isOkOr:
warn logTxt "safeHeaderHash()",
safeHashKey, action="put()", `error`=($$error)
return
proc finalizedHeaderHash*(
db: CoreDbRef;
): Hash256
{.gcsafe, raises: [RlpError].} =
discard db.getHash(finalizedHashKey(), result)
proc finalizedHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
let finalizedHashKey = finalizedHashKey()
db.newKvt.put(finalizedHashKey.toOpenArray, rlp.encode(headerHash)).isOkOr:
warn logTxt "finalizedHeaderHash()",
finalizedHashKey, action="put()", `error`=($$error)
return
proc safeHeader*(
db: CoreDbRef;
): BlockHeader
{.gcsafe, raises: [RlpError,BlockNotFound].} =
db.getBlockHeader(db.safeHeaderHash)
proc finalizedHeader*(
db: CoreDbRef;
): BlockHeader
{.gcsafe, raises: [RlpError,BlockNotFound].} =
db.getBlockHeader(db.finalizedHeaderHash)
proc haveBlockAndState*(db: CoreDbRef, headerHash: Hash256): bool =
var header: BlockHeader
if not db.getBlockHeader(headerHash, header):
return false
# see if stateRoot exists
db.exists(header.stateRoot)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,5 +1,5 @@
# Nimbus # Nimbus
# Copyright (c) 2018 Status Research & Development GmbH # Copyright (c) 2018-2023 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)
@ -13,26 +13,30 @@
import import
std/options, std/options,
eth/[common, trie/db], eth/[common, trie/db],
./backend/[legacy_db], ../aristo,
"."/[base, core_apps] ./backend/legacy_db,
./base,
#./core_apps_legacy as core_apps
./core_apps_newapi as core_apps
export export
common, common,
core_apps, core_apps,
# Provide a standard interface for calculating merkle hash signatures,
# here by quoting `Aristo` functions.
MerkleSignRef,
merkleSignBegin,
merkleSignAdd,
merkleSignCommit,
to,
# Not all symbols from the object sources will be exported by default # Not all symbols from the object sources will be exported by default
CoreDbAccount, CoreDbAccount,
CoreDbApiError, CoreDbApiError,
CoreDbCaptFlags,
CoreDbErrorCode, CoreDbErrorCode,
CoreDbErrorRef, CoreDbErrorRef,
CoreDbCaptRef,
CoreDbKvtRef,
CoreDbMptRef,
CoreDbPhkRef,
CoreDbRef, CoreDbRef,
CoreDbTxID,
CoreDbTxRef,
CoreDbType, CoreDbType,
CoreDbVidRef, CoreDbVidRef,
CoreDxAccRef, CoreDxAccRef,
@ -45,26 +49,26 @@ export
`$$`, `$$`,
backend, backend,
beginTransaction, beginTransaction,
capture,
commit, commit,
compensateLegacySetup, compensateLegacySetup,
contains,
del, del,
delete, delete,
dispose, dispose,
fetch, fetch,
fetchOrEmpty,
finish, finish,
get, get,
getOrEmpty,
getRoot, getRoot,
getTransactionID, getTransactionID,
hash, hash,
hasKey,
hashOrEmpty, hashOrEmpty,
hasPath,
isLegacy, isLegacy,
isPruning, isPruning,
kvt,
logDb, logDb,
merge, merge,
mptPrune,
newAccMpt, newAccMpt,
newCapture, newCapture,
newKvt, newKvt,
@ -72,13 +76,11 @@ export
newTransaction, newTransaction,
pairs, pairs,
parent, parent,
phkPrune,
put, put,
recast, recast,
recorder, recorder,
replicate, replicate,
rollback, rollback,
rootHash,
rootVid, rootVid,
safeDispose, safeDispose,
setTransactionID, setTransactionID,
@ -87,6 +89,27 @@ export
toPhk, toPhk,
toTransactionID toTransactionID
when ProvideCoreDbLegacyAPI:
type
CoreDyTxID = CoreDxTxID|CoreDbTxID
export
CoreDbCaptFlags,
CoreDbCaptRef,
CoreDbKvtRef,
CoreDbMptRef,
CoreDbPhkRef,
CoreDbTxID,
CoreDbTxRef,
capture,
contains,
kvt,
mptPrune,
phkPrune,
rootHash
else:
type
CoreDyTxID = CoreDxTxID
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public constructor # Public constructor
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -120,7 +143,7 @@ proc newCoreDbRef*(
# Public template wrappers # Public template wrappers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
template shortTimeReadOnly*(id: CoreDxTxID|CoreDbTxID; body: untyped) = template shortTimeReadOnly*(id: CoreDyTxID; body: untyped) =
proc action() = proc action() =
body body
id.shortTimeReadOnly action id.shortTimeReadOnly action

View File

@ -16,7 +16,8 @@ import
std/[algorithm, sequtils, sets, strutils, tables, times], std/[algorithm, sequtils, sets, strutils, tables, times],
chronos, chronos,
eth/[common, trie/nibbles], eth/[common, trie/nibbles],
stew/results, stew/byteutils,
results,
"../.."/[constants, range_desc], "../.."/[constants, range_desc],
"."/[hexary_desc, hexary_error] "."/[hexary_desc, hexary_error]
@ -69,7 +70,7 @@ proc ppImpl(key: RepairKey; db: HexaryTreeDbRef): string =
return db.keyPp(key) return db.keyPp(key)
except CatchableError: except CatchableError:
discard discard
key.ByteArray33.toSeq.mapIt(it.toHex(2)).join.toLowerAscii key.ByteArray33.toSeq.toHex.toLowerAscii
proc ppImpl(key: NodeKey; db: HexaryTreeDbRef): string = proc ppImpl(key: NodeKey; db: HexaryTreeDbRef): string =
key.to(RepairKey).ppImpl(db) key.to(RepairKey).ppImpl(db)
@ -563,6 +564,9 @@ proc pp*(db: HexaryTreeDbRef; root: NodeKey; indent=4): string =
## Dump the entries from the a generic repair tree. ## Dump the entries from the a generic repair tree.
db.pp(root, indent.toPfx) db.pp(root, indent.toPfx)
proc pp*(db: HexaryTreeDbRef; root: Hash256; indent=4): string =
## Dump the entries from the a generic repair tree.
db.pp(root.to(NodeKey), indent.toPfx)
proc pp*(m: Moment): string = proc pp*(m: Moment): string =
## Prints a moment in time similar to *chronicles* time format. ## Prints a moment in time similar to *chronicles* time format.

View File

@ -8,23 +8,24 @@
# at your option. This file may not be copied, modified, or distributed except # at your option. This file may not be copied, modified, or distributed except
# according to those terms. # according to those terms.
{.push raises: [].}
import import
std/[math, times, strutils], std/[math, times, strutils],
eth/[rlp, common/eth_types_rlp], eth/[rlp, common/eth_types_rlp],
stew/byteutils, stew/byteutils,
nimcrypto, nimcrypto,
results,
../db/core_db, ../db/core_db,
../constants ../constants
export eth_types_rlp export eth_types_rlp
{.push raises: [].}
proc calcRootHash[T](items: openArray[T]): Hash256 {.gcsafe.} = proc calcRootHash[T](items: openArray[T]): Hash256 {.gcsafe.} =
var tr = newCoreDbRef(LegacyDbMemory).mptPrune let sig = merkleSignBegin()
for i, t in items: for i, t in items:
tr.put(rlp.encode(i), rlp.encode(t)) sig.merkleSignAdd(rlp.encode(i), rlp.encode(t))
return tr.rootHash sig.merkleSignCommit.value.to(Hash256)
template calcTxRoot*(transactions: openArray[Transaction]): Hash256 = template calcTxRoot*(transactions: openArray[Transaction]): Hash256 =
calcRootHash(transactions) calcRootHash(transactions)

View File

@ -13,6 +13,7 @@ cliBuilder:
import ./test_code_stream, import ./test_code_stream,
./test_accounts_cache, ./test_accounts_cache,
./test_aristo, ./test_aristo,
./test_coredb,
./test_custom_network, ./test_custom_network,
./test_sync_snap, ./test_sync_snap,
./test_rocksdb_timing, ./test_rocksdb_timing,

View File

@ -13,10 +13,11 @@
## ---------------------------------------------------- ## ----------------------------------------------------
import import
std/[tables], std/tables,
./pp_light, eth/common,
stew/byteutils,
../../nimbus/common/chain_config, ../../nimbus/common/chain_config,
eth/common ./pp_light
export export
pp_light pp_light
@ -26,16 +27,16 @@ export
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc pp*(b: Blob): string = proc pp*(b: Blob): string =
b.mapIt(it.toHex(2)).join.toLowerAscii.pp(hex = true) b.toHex.pp(hex = true)
proc pp*(a: EthAddress): string = proc pp*(a: EthAddress): string =
a.mapIt(it.toHex(2)).join[32 .. 39].toLowerAscii a.toHex[32 .. 39]
proc pp*(a: openArray[EthAddress]): string = proc pp*(a: openArray[EthAddress]): string =
"[" & a.mapIt(it.pp).join(" ") & "]" "[" & a.mapIt(it.pp).join(" ") & "]"
proc pp*(a: BlockNonce): string = proc pp*(a: BlockNonce): string =
a.mapIt(it.toHex(2)).join.toLowerAscii a.toHex
proc pp*(h: BlockHeader; sep = " "): string = proc pp*(h: BlockHeader; sep = " "): string =
"" & "" &

View File

@ -107,7 +107,7 @@ proc pp*(q: openArray[int]; itemsPerLine: int; lineSep: string): string =
proc pp*(a: MDigest[256]; collapse = true): string = proc pp*(a: MDigest[256]; collapse = true): string =
if not collapse: if not collapse:
a.data.toHex.toLowerAscii a.data.toHex
elif a == ZERO_HASH256: elif a == ZERO_HASH256:
"ZERO_HASH256" "ZERO_HASH256"
elif a == EMPTY_ROOT_HASH: elif a == EMPTY_ROOT_HASH:
@ -119,7 +119,7 @@ proc pp*(a: MDigest[256]; collapse = true): string =
elif a == ZERO_HASH256: elif a == ZERO_HASH256:
"ZERO_HASH256" "ZERO_HASH256"
else: else:
a.data.toHex.join[56 .. 63].toLowerAscii a.data.toHex.join[56 .. 63]
proc pp*(a: openArray[MDigest[256]]; collapse = true): string = proc pp*(a: openArray[MDigest[256]]; collapse = true): string =
"@[" & a.toSeq.mapIt(it.pp).join(" ") & "]" "@[" & a.toSeq.mapIt(it.pp).join(" ") & "]"
@ -133,7 +133,7 @@ proc pp*(q: openArray[byte]; noHash = false): string =
for n in 0..31: a[n] = q[n] for n in 0..31: a[n] = q[n]
MDigest[256](data: a).pp MDigest[256](data: a).pp
else: else:
q.toHex.toLowerAscii.pp(hex = true) q.toHex.pp(hex = true)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Elapsed time pretty printer # Elapsed time pretty printer

View File

@ -9,13 +9,15 @@
# according to those terms. # according to those terms.
import import
std/[os, sequtils, strformat, strutils], std/[os, strformat, strutils],
eth/common, eth/common,
nimcrypto/utils,
stew/byteutils, stew/byteutils,
../../nimbus/sync/[protocol, snap/range_desc], ../../nimbus/sync/[protocol, snap/range_desc],
./gunzip ./gunzip
import
nimcrypto/utils except toHex
type type
UndumpState = enum UndumpState = enum
UndumpHeader UndumpHeader
@ -66,16 +68,16 @@ proc dumpAccounts*(
): string = ): string =
## Dump accounts data in parseable Ascii text ## Dump accounts data in parseable Ascii text
proc ppStr(blob: Blob): string = proc ppStr(blob: Blob): string =
blob.mapIt(it.toHex(2)).join.toLowerAscii blob.toHex
proc ppStr(proof: SnapProof): string = proc ppStr(proof: SnapProof): string =
proof.to(Blob).ppStr proof.to(Blob).ppStr
proc ppStr(hash: Hash256): string = proc ppStr(hash: Hash256): string =
hash.data.mapIt(it.toHex(2)).join.toLowerAscii hash.data.toHex
proc ppStr(key: NodeKey): string = proc ppStr(key: NodeKey): string =
key.ByteArray32.mapIt(it.toHex(2)).join.toLowerAscii key.ByteArray32.toHex
result = "accounts " & $data.accounts.len & " " & $data.proof.len & "\n" result = "accounts " & $data.accounts.len & " " & $data.proof.len & "\n"

View File

@ -9,13 +9,15 @@
# according to those terms. # according to those terms.
import import
std/[os, sequtils, strformat, strutils], std/[os, strformat, strutils],
eth/common, eth/common,
nimcrypto/utils,
stew/byteutils, stew/byteutils,
../../nimbus/sync/[protocol, snap/range_desc], ../../nimbus/sync/[protocol, snap/range_desc],
./gunzip ./gunzip
import
nimcrypto/utils except toHex
type type
UndumpState = enum UndumpState = enum
UndumpStoragesHeader UndumpStoragesHeader
@ -66,16 +68,16 @@ proc dumpStorages*(
): string = ): string =
## Dump account and storage data in parseable Ascii text ## Dump account and storage data in parseable Ascii text
proc ppStr(blob: Blob): string = proc ppStr(blob: Blob): string =
blob.mapIt(it.toHex(2)).join.toLowerAscii blob.toHex
proc ppStr(proof: SnapProof): string = proc ppStr(proof: SnapProof): string =
proof.to(Blob).ppStr proof.to(Blob).ppStr
proc ppStr(hash: Hash256): string = proc ppStr(hash: Hash256): string =
hash.data.mapIt(it.toHex(2)).join.toLowerAscii hash.data.toHex
proc ppStr(key: NodeKey): string = proc ppStr(key: NodeKey): string =
key.ByteArray32.mapIt(it.toHex(2)).join.toLowerAscii key.ByteArray32.toHex
result = "storages " & $data.storages.len & " " & $data.proof.len & "\n" result = "storages " & $data.storages.len & " " & $data.proof.len & "\n"
result &= root.ppStr & "\n" result &= root.ppStr & "\n"

View File

@ -92,6 +92,9 @@ proc miscRunner(
test "Multi instances transactions": test "Multi instances transactions":
check noisy.testTxSpanMultiInstances() check noisy.testTxSpanMultiInstances()
test "Short keys and other patholgical cases":
check noisy.testShortKeys()
proc accountsRunner( proc accountsRunner(
noisy = true; noisy = true;

View File

@ -26,7 +26,7 @@ import
aristo_init/memory_db, aristo_init/memory_db,
aristo_init/rocks_db, aristo_init/rocks_db,
aristo_persistent, aristo_persistent,
aristo_transcode, aristo_blobify,
aristo_vid], aristo_vid],
../replay/xcheck, ../replay/xcheck,
./test_helpers ./test_helpers
@ -45,8 +45,8 @@ func hash(filter: FilterRef): Hash =
## ##
var h = BlindHash var h = BlindHash
if not filter.isNil: if not filter.isNil:
h = h !& filter.src.ByteArray32.hash h = h !& filter.src.hash
h = h !& filter.trg.ByteArray32.hash h = h !& filter.trg.hash
for w in filter.vGen.vidReorg: for w in filter.vGen.vidReorg:
h = h !& w.uint64.hash h = h !& w.uint64.hash
@ -56,7 +56,7 @@ func hash(filter: FilterRef): Hash =
h = h !& (w.uint64.toBytesBE.toSeq & data).hash h = h !& (w.uint64.toBytesBE.toSeq & data).hash
for w in filter.kMap.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID): for w in filter.kMap.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID):
let data = filter.kMap.getOrVoid(w).ByteArray32.toSeq let data = @(filter.kMap.getOrVoid(w))
h = h !& (w.uint64.toBytesBE.toSeq & data).hash h = h !& (w.uint64.toBytesBE.toSeq & data).hash
!$h !$h
@ -67,7 +67,7 @@ func hash(filter: FilterRef): Hash =
proc mergeData( proc mergeData(
db: AristoDbRef; db: AristoDbRef;
rootKey: HashKey; rootKey: Hash256;
rootVid: VertexID; rootVid: VertexID;
proof: openArray[SnapProof]; proof: openArray[SnapProof];
leafs: openArray[LeafTiePayload]; leafs: openArray[LeafTiePayload];
@ -201,11 +201,11 @@ proc testBackendConsistency*(
): bool = ): bool =
## Import accounts ## Import accounts
var var
filTab: Table[QueueID,Hash] # Filter register filTab: Table[QueueID,Hash] # Filter register
ndb = AristoDbRef() # Reference cache ndb = AristoDbRef() # Reference cache
mdb = AristoDbRef() # Memory backend database mdb = AristoDbRef() # Memory backend database
rdb = AristoDbRef() # Rocks DB backend database rdb = AristoDbRef() # Rocks DB backend database
rootKey = HashKey.default rootKey = Hash256() # Root key
count = 0 count = 0
defer: defer:

View File

@ -19,7 +19,7 @@ import
unittest2, unittest2,
../../nimbus/db/aristo/[ ../../nimbus/db/aristo/[
aristo_check, aristo_debug, aristo_desc, aristo_filter, aristo_get, aristo_check, aristo_debug, aristo_desc, aristo_filter, aristo_get,
aristo_merge, aristo_persistent, aristo_transcode], aristo_merge, aristo_persistent, aristo_blobify],
../../nimbus/db/aristo, ../../nimbus/db/aristo,
../../nimbus/db/aristo/aristo_desc/desc_backend, ../../nimbus/db/aristo/aristo_desc/desc_backend,
../../nimbus/db/aristo/aristo_filter/[filter_fifos, filter_scheduler], ../../nimbus/db/aristo/aristo_filter/[filter_fifos, filter_scheduler],
@ -72,12 +72,13 @@ proc fList(be: BackendRef): seq[(QueueID,FilterRef)] =
check be.kind == BackendMemory or be.kind == BackendRocksDB check be.kind == BackendMemory or be.kind == BackendRocksDB
func ppFil(w: FilterRef; db = AristoDbRef(nil)): string = func ppFil(w: FilterRef; db = AristoDbRef(nil)): string =
proc qq(key: HashKey; db: AristoDbRef): string = proc qq(key: Hash256; db: AristoDbRef): string =
if db.isNil: if db.isNil:
let n = key.to(UInt256) let n = key.to(UInt256)
if n == 0: "£ø" else: "£" & $n if n == 0: "£ø" else: "£" & $n
else: else:
HashLabel(root: VertexID(1), key: key).pp(db) let keyLink = HashKey.fromBytes(key.data).value
HashLabel(root: VertexID(1), key: keyLink).pp(db)
"(" & w.fid.pp & "," & w.src.qq(db) & "->" & w.trg.qq(db) & ")" "(" & w.fid.pp & "," & w.src.qq(db) & "->" & w.trg.qq(db) & ")"
func pp(qf: (QueueID,FilterRef); db = AristoDbRef(nil)): string = func pp(qf: (QueueID,FilterRef); db = AristoDbRef(nil)): string =
@ -376,9 +377,6 @@ proc checkFilterTrancoderOk(
# ------------------------- # -------------------------
func to(fid: FilterID; T: type HashKey): T =
fid.uint64.u256.toBytesBE.T
proc qid2fidFn(be: BackendRef): QuFilMap = proc qid2fidFn(be: BackendRef): QuFilMap =
result = proc(qid: QueueID): FilterID = result = proc(qid: QueueID): FilterID =
let rc = be.getFilFn qid let rc = be.getFilFn qid
@ -414,8 +412,8 @@ proc storeFilter(
let fid = FilterID(serial) let fid = FilterID(serial)
be.storeFilter FilterRef( be.storeFilter FilterRef(
fid: fid, fid: fid,
src: fid.to(HashKey), src: fid.to(Hash256),
trg: (fid-1).to(HashKey)) trg: (fid-1).to(Hash256))
proc fetchDelete( proc fetchDelete(
be: BackendRef; be: BackendRef;
@ -496,7 +494,7 @@ proc validateFifo(
lastFid = FilterID(serial+1) lastFid = FilterID(serial+1)
if hashesOk: if hashesOk:
lastTrg = be.getKeyFn(VertexID(1)).get(otherwise=VOID_HASH_KEY).to(UInt256) lastTrg = be.getKeyFn(VertexID(1)).get(otherwise=HashKey()).to(UInt256)
for chn,fifo in be.fifos: for chn,fifo in be.fifos:
for (qid,filter) in fifo: for (qid,filter) in fifo:
@ -750,8 +748,7 @@ proc testFilterBacklog*(
s &= " n=" & $serial s &= " n=" & $serial
s &= " len=" & $be.filters.len s &= " len=" & $be.filters.len
s &= "" & s &= "" &
" root=" & be.getKeyFn(VertexID(1)) " root=" & be.getKeyFn(VertexID(1)).get(otherwise=VOID_HASH_KEY).pp &
.get(otherwise = VOID_HASH_KEY).pp &
"\n state=" & be.filters.state.pp & "\n state=" & be.filters.state.pp &
"\n fifo=" & be.fifos.pp(db) & "\n fifo=" & be.fifos.pp(db) &
"\n" "\n"

View File

@ -14,19 +14,18 @@ import
eth/common, eth/common,
rocksdb, rocksdb,
../../nimbus/db/aristo/[ ../../nimbus/db/aristo/[
aristo_constants, aristo_debug, aristo_desc, aristo_debug, aristo_desc, aristo_filter/filter_scheduler, aristo_merge],
aristo_filter/filter_scheduler, aristo_merge],
../../nimbus/db/kvstore_rocksdb, ../../nimbus/db/kvstore_rocksdb,
../../nimbus/sync/protocol/snap/snap_types, ../../nimbus/sync/protocol/snap/snap_types,
../test_sync_snap/test_types, ../test_sync_snap/test_types,
../replay/[pp, undump_accounts, undump_storages] ../replay/[pp, undump_accounts, undump_storages]
from ../../nimbus/sync/snap/range_desc from ../../nimbus/sync/snap/range_desc
import NodeKey import NodeKey, ByteArray32
type type
ProofTrieData* = object ProofTrieData* = object
root*: HashKey root*: Hash256
id*: int id*: int
proof*: seq[SnapProof] proof*: seq[SnapProof]
kvpLst*: seq[LeafTiePayload] kvpLst*: seq[LeafTiePayload]
@ -39,24 +38,29 @@ const
# Private helpers # Private helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc toPfx(indent: int): string = func toPfx(indent: int): string =
"\n" & " ".repeat(indent) "\n" & " ".repeat(indent)
proc to(a: NodeKey; T: type HashKey): T = func to(a: NodeKey; T: type UInt256): T =
a.T T.fromBytesBE ByteArray32(a)
func to(a: NodeKey; T: type PathID): T =
a.to(UInt256).to(T)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public pretty printing # Public pretty printing
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc pp*( func pp*(
w: ProofTrieData; w: ProofTrieData;
rootID: VertexID; rootID: VertexID;
db: AristoDbRef; db: AristoDbRef;
indent = 4; indent = 4;
): string = ): string =
let pfx = indent.toPfx let
result = "(" & HashLabel(root: rootID, key: w.root).pp(db) pfx = indent.toPfx
rootLink = w.root.to(HashKey)
result = "(" & HashLabel(root: rootID, key: rootLink).pp(db)
result &= "," & $w.id & ",[" & $w.proof.len & "]," result &= "," & $w.id & ",[" & $w.proof.len & "],"
result &= pfx & " [" result &= pfx & " ["
for n,kvp in w.kvpLst: for n,kvp in w.kvpLst:
@ -99,24 +103,36 @@ proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
# Public helpers # Public helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc `==`*[T: AristoError|VertexID](a: T, b: int): bool = func `==`*[T: AristoError|VertexID](a: T, b: int): bool =
a == T(b) a == T(b)
proc `==`*(a: (VertexID,AristoError), b: (int,int)): bool = func `==`*(a: (VertexID,AristoError), b: (int,int)): bool =
(a[0].int,a[1].int) == b (a[0].int,a[1].int) == b
proc `==`*(a: (VertexID,AristoError), b: (int,AristoError)): bool = func `==`*(a: (VertexID,AristoError), b: (int,AristoError)): bool =
(a[0].int,a[1]) == b (a[0].int,a[1]) == b
proc `==`*(a: (int,AristoError), b: (int,int)): bool = func `==`*(a: (int,AristoError), b: (int,int)): bool =
(a[0],a[1].int) == b (a[0],a[1].int) == b
proc `==`*(a: (int,VertexID,AristoError), b: (int,int,int)): bool = func `==`*(a: (int,VertexID,AristoError), b: (int,int,int)): bool =
(a[0], a[1].int, a[2].int) == b (a[0], a[1].int, a[2].int) == b
proc `==`*(a: (QueueID,Hash), b: (int,Hash)): bool = func `==`*(a: (QueueID,Hash), b: (int,Hash)): bool =
(a[0].int,a[1]) == b (a[0].int,a[1]) == b
func to*(a: Hash256; T: type UInt256): T =
T.fromBytesBE a.data
func to*(a: Hash256; T: type PathID): T =
a.to(UInt256).to(T)
func to*(a: HashKey; T: type UInt256): T =
T.fromBytesBE 0u8.repeat(32 - a.len) & @a
func to*(fid: FilterID; T: type Hash256): T =
result.data = fid.uint64.u256.toBytesBE
proc to*(sample: AccountsSample; T: type seq[UndumpAccounts]): T = proc to*(sample: AccountsSample; T: type seq[UndumpAccounts]): T =
## Convert test data into usable in-memory format ## Convert test data into usable in-memory format
let file = sample.file.findFilePath.value let file = sample.file.findFilePath.value
@ -149,10 +165,10 @@ proc to*(sample: AccountsSample; T: type seq[UndumpStorages]): T =
break break
result.add w result.add w
proc to*(ua: seq[UndumpAccounts]; T: type seq[ProofTrieData]): T = func to*(ua: seq[UndumpAccounts]; T: type seq[ProofTrieData]): T =
var (rootKey, rootVid) = (VOID_HASH_KEY, VertexID(0)) var (rootKey, rootVid) = (Hash256(), VertexID(0))
for w in ua: for w in ua:
let thisRoot = w.root.to(HashKey) let thisRoot = w.root
if rootKey != thisRoot: if rootKey != thisRoot:
(rootKey, rootVid) = (thisRoot, VertexID(rootVid.uint64 + 1)) (rootKey, rootVid) = (thisRoot, VertexID(rootVid.uint64 + 1))
if 0 < w.data.accounts.len: if 0 < w.data.accounts.len:
@ -162,14 +178,14 @@ proc to*(ua: seq[UndumpAccounts]; T: type seq[ProofTrieData]): T =
kvpLst: w.data.accounts.mapIt(LeafTiePayload( kvpLst: w.data.accounts.mapIt(LeafTiePayload(
leafTie: LeafTie( leafTie: LeafTie(
root: rootVid, root: rootVid,
path: it.accKey.to(HashKey).to(PathID)), path: it.accKey.to(PathID)),
payload: PayloadRef(pType: RawData, rawBlob: it.accBlob)))) payload: PayloadRef(pType: RawData, rawBlob: it.accBlob))))
proc to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T = func to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
var (rootKey, rootVid) = (VOID_HASH_KEY, VertexID(0)) var (rootKey, rootVid) = (Hash256(), VertexID(0))
for n,s in us: for n,s in us:
for w in s.data.storages: for w in s.data.storages:
let thisRoot = w.account.storageRoot.to(HashKey) let thisRoot = w.account.storageRoot
if rootKey != thisRoot: if rootKey != thisRoot:
(rootKey, rootVid) = (thisRoot, VertexID(rootVid.uint64 + 1)) (rootKey, rootVid) = (thisRoot, VertexID(rootVid.uint64 + 1))
if 0 < w.data.len: if 0 < w.data.len:
@ -179,12 +195,12 @@ proc to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
kvpLst: w.data.mapIt(LeafTiePayload( kvpLst: w.data.mapIt(LeafTiePayload(
leafTie: LeafTie( leafTie: LeafTie(
root: rootVid, root: rootVid,
path: it.slotHash.to(HashKey).to(PathID)), path: it.slotHash.to(PathID)),
payload: PayloadRef(pType: RawData, rawBlob: it.slotData)))) payload: PayloadRef(pType: RawData, rawBlob: it.slotData))))
if 0 < result.len: if 0 < result.len:
result[^1].proof = s.data.proof result[^1].proof = s.data.proof
proc mapRootVid*( func mapRootVid*(
a: openArray[LeafTiePayload]; a: openArray[LeafTiePayload];
toVid: VertexID; toVid: VertexID;
): seq[LeafTiePayload] = ): seq[LeafTiePayload] =

View File

@ -20,7 +20,7 @@ import
unittest2, unittest2,
../../nimbus/db/aristo, ../../nimbus/db/aristo,
../../nimbus/db/aristo/[ ../../nimbus/db/aristo/[
aristo_debug, aristo_desc, aristo_transcode, aristo_vid], aristo_check, aristo_debug, aristo_desc, aristo_blobify, aristo_vid],
../../nimbus/db/aristo/aristo_filter/filter_scheduler, ../../nimbus/db/aristo/aristo_filter/filter_scheduler,
../replay/xcheck, ../replay/xcheck,
./test_helpers ./test_helpers
@ -457,6 +457,55 @@ proc testQidScheduler*(
true true
proc testShortKeys*(
noisy = true;
): bool =
## Check for some pathological cases
func x(s: string): Blob = s.hexToSeqByte
func k(s: string): HashKey = HashKey.fromBytes(s.x).value
let samples = [
# From InvalidBlocks/bc4895-withdrawals/twoIdenticalIndex.json
[("80".x,
"da808094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x,
"27f166f1d7c789251299535cb176ba34116e44894476a7886fe5d73d9be5c973".k),
("01".x,
"da028094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x,
"81eac5f476f48feb289af40ee764015f6b49036760438ea45df90d5342b6ae61".k),
("02".x,
"da018094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x,
"463769ae507fcc6d6231c8888425191c5622f330fdd4b78a7b24c4521137b573".k),
("03".x,
"da028094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x,
"a95b9a7b58a6b3cb4001eb0be67951c5517141cb0183a255b5cae027a7b10b36".k)]]
for n,sample in samples:
let sig = merkleSignBegin()
var inx = -1
for (k,v,r) in sample:
inx.inc
sig.merkleSignAdd(k,v)
false.say "*** testShortkeys (1)", "n=", n, " inx=", inx,
"\n k=", k.toHex, " v=", v.toHex,
"\n r=", r.pp(sig),
"\n ", sig.pp(),
"\n"
let w = sig.merkleSignCommit().value
false.say "*** testShortkeys (2)", "n=", n, " inx=", inx,
"\n k=", k.toHex, " v=", v.toHex,
"\n r=", r.pp(sig),
"\n R=", w.pp(sig),
"\n ", sig.pp(),
"\n",
"\n ----------------",
"\n"
let rc = sig.db.check
xCheckRc rc.error == (0,0)
xCheck r == w
true
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -388,7 +388,7 @@ proc testTxMergeProofAndKvpList*(
var var
db = AristoDbRef() db = AristoDbRef()
tx = AristoTxRef(nil) tx = AristoTxRef(nil)
rootKey: HashKey rootKey: Hash256
count = 0 count = 0
defer: defer:
db.finish(flush=true) db.finish(flush=true)

View File

@ -17,7 +17,7 @@ import
eth/common, eth/common,
results, results,
unittest2, unittest2,
../../nimbus/db/[core_db/persistent, ledger], ../../nimbus/db/core_db/persistent,
../../nimbus/core/chain, ../../nimbus/core/chain,
./replay/pp, ./replay/pp,
./test_coredb/[coredb_test_xx, test_chainsync] ./test_coredb/[coredb_test_xx, test_chainsync]
@ -102,7 +102,7 @@ proc openLegacyDB(
# Test Runners: accounts and accounts storages # Test Runners: accounts and accounts storages
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc legacyRunner( proc chainSyncRunner(
noisy = true; noisy = true;
capture = bChainCapture; capture = bChainCapture;
persistent = true; persistent = true;
@ -120,7 +120,7 @@ proc legacyRunner(
defer: defer:
if persistent: baseDir.flushDbDir if persistent: baseDir.flushDbDir
suite "Legacy DB: test Core API interfaces"& suite "CoreDB and LedgerRef API"&
&", capture={fileInfo}, {sayPersistent}": &", capture={fileInfo}, {sayPersistent}":
test &"Ledger API, {numBlocksInfo} blocks": test &"Ledger API, {numBlocksInfo} blocks":
@ -137,7 +137,7 @@ proc legacyRunner(
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc coreDbMain*(noisy = defined(debug)) = proc coreDbMain*(noisy = defined(debug)) =
noisy.legacyRunner() noisy.chainSyncRunner()
when isMainModule: when isMainModule:
const const
@ -155,7 +155,7 @@ when isMainModule:
testList = @[bulkTest2, bulkTest3] testList = @[bulkTest2, bulkTest3]
for n,capture in testList: for n,capture in testList:
noisy.legacyRunner(capture=capture, persistent=persDb) noisy.chainSyncRunner(capture=capture, persistent=persDb)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End